Merge "Revert experimental lambda feature."
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index df7df26..93e310e 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -57,6 +57,9 @@
# Do you want optimizing compiler tests run?
ART_TEST_OPTIMIZING ?= true
+# Do you want to test the optimizing compiler with graph coloring register allocation?
+ART_TEST_OPTIMIZING_GRAPH_COLOR ?= $(ART_TEST_FULL)
+
# Do we want to test a PIC-compiled core image?
ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 8261a87..0ede30d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -67,8 +67,9 @@
optimizing/parallel_move_resolver.cc \
optimizing/prepare_for_register_allocation.cc \
optimizing/reference_type_propagation.cc \
- optimizing/register_allocator.cc \
optimizing/register_allocation_resolver.cc \
+ optimizing/register_allocator.cc \
+ optimizing/register_allocator_graph_color.cc \
optimizing/register_allocator_linear_scan.cc \
optimizing/select_generator.cc \
optimizing/sharpening.cc \
@@ -80,6 +81,7 @@
optimizing/x86_memory_gen.cc \
trampolines/trampoline_compiler.cc \
utils/assembler.cc \
+ utils/jni_macro_assembler.cc \
utils/swap_space.cc \
compiler.cc \
elf_writer.cc \
@@ -96,6 +98,7 @@
utils/arm/assembler_arm.cc \
utils/arm/assembler_arm32.cc \
utils/arm/assembler_thumb2.cc \
+ utils/arm/jni_macro_assembler_arm.cc \
utils/arm/managed_register_arm.cc \
# TODO We should really separate out those files that are actually needed for both variants of an
@@ -112,6 +115,7 @@
optimizing/instruction_simplifier_shared.cc \
optimizing/intrinsics_arm64.cc \
utils/arm64/assembler_arm64.cc \
+ utils/arm64/jni_macro_assembler_arm64.cc \
utils/arm64/managed_register_arm64.cc \
LIBART_COMPILER_SRC_FILES_mips := \
@@ -141,6 +145,7 @@
optimizing/intrinsics_x86.cc \
optimizing/pc_relative_fixups_x86.cc \
utils/x86/assembler_x86.cc \
+ utils/x86/jni_macro_assembler_x86.cc \
utils/x86/managed_register_x86.cc \
LIBART_COMPILER_SRC_FILES_x86_64 := \
@@ -150,6 +155,7 @@
optimizing/intrinsics_x86_64.cc \
optimizing/code_generator_x86_64.cc \
utils/x86_64/assembler_x86_64.cc \
+ utils/x86_64/jni_macro_assembler_x86_64.cc \
utils/x86_64/managed_register_x86_64.cc \
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index f20dba3..30ba8c9 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -44,7 +44,9 @@
init_failure_output_(nullptr),
dump_cfg_file_name_(""),
dump_cfg_append_(false),
- force_determinism_(false) {
+ force_determinism_(false),
+ register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
+ passes_to_run_(nullptr) {
}
CompilerOptions::~CompilerOptions() {
@@ -74,7 +76,9 @@
bool abort_on_hard_verifier_failure,
const std::string& dump_cfg_file_name,
bool dump_cfg_append,
- bool force_determinism
+ bool force_determinism,
+ RegisterAllocator::Strategy regalloc_strategy,
+ const std::vector<std::string>* passes_to_run
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -99,7 +103,9 @@
init_failure_output_(init_failure_output),
dump_cfg_file_name_(dump_cfg_file_name),
dump_cfg_append_(dump_cfg_append),
- force_determinism_(force_determinism) {
+ force_determinism_(force_determinism),
+ register_allocation_strategy_(regalloc_strategy),
+ passes_to_run_(passes_to_run) {
}
void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
@@ -144,6 +150,19 @@
}
}
+void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option,
+ UsageFn Usage) {
+ DCHECK(option.starts_with("--register-allocation-strategy="));
+ StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data();
+ if (choice == "linear-scan") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
+ } else if (choice == "graph-color") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
+ } else {
+ Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color.");
+ }
+}
+
bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) {
if (option.starts_with("--compiler-filter=")) {
const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
@@ -190,6 +209,8 @@
dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
} else if (option.starts_with("--dump-cfg-append")) {
dump_cfg_append_ = true;
+ } else if (option.starts_with("--register-allocation-strategy=")) {
+ ParseRegisterAllocationStrategy(option, Usage);
} else {
// Option not recognized.
return false;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 60b700a..abc58d7 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "compiler_filter.h"
#include "globals.h"
+#include "optimizing/register_allocator.h"
#include "utils.h"
namespace art {
@@ -74,7 +75,9 @@
bool abort_on_hard_verifier_failure,
const std::string& dump_cfg_file_name,
bool dump_cfg_append,
- bool force_determinism);
+ bool force_determinism,
+ RegisterAllocator::Strategy regalloc_strategy,
+ const std::vector<std::string>* passes_to_run);
CompilerFilter::Filter GetCompilerFilter() const {
return compiler_filter_;
@@ -244,6 +247,14 @@
return force_determinism_;
}
+ RegisterAllocator::Strategy GetRegisterAllocationStrategy() const {
+ return register_allocation_strategy_;
+ }
+
+ const std::vector<std::string>* GetPassesToRun() const {
+ return passes_to_run_;
+ }
+
private:
void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -254,6 +265,7 @@
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage);
void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage);
+ void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage);
CompilerFilter::Filter compiler_filter_;
size_t huge_method_threshold_;
@@ -297,6 +309,16 @@
// outcomes.
bool force_determinism_;
+ RegisterAllocator::Strategy register_allocation_strategy_;
+
+ // If not null, specifies optimization passes which will be run instead of defaults.
+ // Note that passes_to_run_ is not checked for correctness and providing an incorrect
+ // list of passes can lead to unexpected compiler behaviour. This is caused by dependencies
+ // between passes. Failing to satisfy them can for example lead to compiler crashes.
+ // Passing pass names which are not recognized by the compiler will result in
+ // compiler-dependant behavior.
+ const std::vector<std::string>* passes_to_run_;
+
friend class Dex2Oat;
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7a34683..7c87a60 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -39,6 +39,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/concurrent_copying.h"
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -1377,6 +1378,8 @@
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ image_methods_[ImageHeader::kSaveEverythingMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kSaveEverything);
// Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -1823,6 +1826,11 @@
const auto it = saved_hashcode_map_.find(obj);
dst->SetLockWord(it != saved_hashcode_map_.end() ?
LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false);
+ if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+ // Treat all of the objects in the image as marked to avoid unnecessary dirty pages. This is
+ // safe since we mark all of the objects that may reference non immune objects as gray.
+ CHECK(dst->AtomicSetMarkBit(0, 1));
+ }
FixupObject(obj, dst);
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 626a975..7d13656 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -217,8 +217,7 @@
// uint32 = typeof(lockword_)
// Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
// failures due to invalid read barrier bits during object field reads.
- static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
- LockWord::kReadBarrierStateSize;
+ static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
// 111000.....0
static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 1785338..6f6a8f5 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -32,6 +32,7 @@
#include "oat_file-inl.h"
#include "oat_quick_method_header.h"
#include "object_lock.h"
+#include "optimizing/register_allocator.h"
#include "thread_list.h"
namespace art {
@@ -110,7 +111,9 @@
/* abort_on_hard_verifier_failure */ false,
/* dump_cfg_file_name */ "",
/* dump_cfg_append */ false,
- /* force_determinism */ false));
+ /* force_determinism */ false,
+ RegisterAllocator::kRegisterAllocatorDefault,
+ /* passes_to_run */ nullptr));
for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
compiler_options_->ParseCompilerOption(argument, Usage);
}
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 3526802..524ce4d 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -19,10 +19,12 @@
#include "arch/instruction_set.h"
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "cfi_test.h"
#include "gtest/gtest.h"
#include "jni/quick/calling_convention.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "jni/jni_cfi_test_expected.inc"
@@ -36,9 +38,23 @@
// Enable this flag to generate the expected outputs.
static constexpr bool kGenerateExpected = false;
- void TestImpl(InstructionSet isa, const char* isa_str,
+ void TestImpl(InstructionSet isa,
+ const char* isa_str,
const std::vector<uint8_t>& expected_asm,
const std::vector<uint8_t>& expected_cfi) {
+ if (Is64BitInstructionSet(isa)) {
+ TestImplSized<PointerSize::k64>(isa, isa_str, expected_asm, expected_cfi);
+ } else {
+ TestImplSized<PointerSize::k32>(isa, isa_str, expected_asm, expected_cfi);
+ }
+ }
+
+ private:
+ template <PointerSize kPointerSize>
+ void TestImplSized(InstructionSet isa,
+ const char* isa_str,
+ const std::vector<uint8_t>& expected_asm,
+ const std::vector<uint8_t>& expected_cfi) {
// Description of simple method.
const bool is_static = true;
const bool is_synchronized = false;
@@ -55,7 +71,8 @@
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
// Assemble the method.
- std::unique_ptr<Assembler> jni_asm(Assembler::Create(&arena, isa));
+ std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm(
+ JNIMacroAssembler<kPointerSize>::Create(&arena, isa));
jni_asm->cfi().SetEnabled(true);
jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
callee_save_regs, mr_conv->EntrySpills());
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 16b4386..da72c75 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -1,8 +1,7 @@
static constexpr uint8_t expected_asm_kThumb2[] = {
0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90,
- 0xCD, 0xF8, 0x84, 0x10, 0x8D, 0xED, 0x22, 0x0A, 0xCD, 0xF8, 0x8C, 0x20,
- 0xCD, 0xF8, 0x90, 0x30, 0x88, 0xB0, 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC,
- 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D,
+ 0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0,
+ 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A,
@@ -11,7 +10,7 @@
0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05,
0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05,
0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01,
- 0x54, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
+ 0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06,
0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06,
0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44,
@@ -47,38 +46,38 @@
// 0x00000008: sub sp, sp, #36
// 0x0000000a: .cfi_def_cfa_offset: 128
// 0x0000000a: str r0, [sp, #0]
-// 0x0000000c: str.w r1, [sp, #132]
-// 0x00000010: vstr.f32 s0, [sp, #136]
-// 0x00000014: str.w r2, [sp, #140]
-// 0x00000018: str.w r3, [sp, #144]
-// 0x0000001c: sub sp, sp, #32
-// 0x0000001e: .cfi_def_cfa_offset: 160
-// 0x0000001e: add sp, sp, #32
-// 0x00000020: .cfi_def_cfa_offset: 128
-// 0x00000020: .cfi_remember_state
-// 0x00000020: add sp, sp, #36
-// 0x00000022: .cfi_def_cfa_offset: 92
-// 0x00000022: vpop.f32 {s16-s31}
-// 0x00000026: .cfi_def_cfa_offset: 28
-// 0x00000026: .cfi_restore_extended: r80
-// 0x00000026: .cfi_restore_extended: r81
-// 0x00000026: .cfi_restore_extended: r82
-// 0x00000026: .cfi_restore_extended: r83
-// 0x00000026: .cfi_restore_extended: r84
-// 0x00000026: .cfi_restore_extended: r85
-// 0x00000026: .cfi_restore_extended: r86
-// 0x00000026: .cfi_restore_extended: r87
-// 0x00000026: .cfi_restore_extended: r88
-// 0x00000026: .cfi_restore_extended: r89
-// 0x00000026: .cfi_restore_extended: r90
-// 0x00000026: .cfi_restore_extended: r91
-// 0x00000026: .cfi_restore_extended: r92
-// 0x00000026: .cfi_restore_extended: r93
-// 0x00000026: .cfi_restore_extended: r94
-// 0x00000026: .cfi_restore_extended: r95
-// 0x00000026: pop {r5, r6, r7, r8, r10, r11, pc}
-// 0x0000002a: .cfi_restore_state
-// 0x0000002a: .cfi_def_cfa_offset: 128
+// 0x0000000c: str r1, [sp, #132]
+// 0x0000000e: vstr.f32 s0, [sp, #136]
+// 0x00000012: str r2, [sp, #140]
+// 0x00000014: str r3, [sp, #144]
+// 0x00000016: sub sp, sp, #32
+// 0x00000018: .cfi_def_cfa_offset: 160
+// 0x00000018: add sp, sp, #32
+// 0x0000001a: .cfi_def_cfa_offset: 128
+// 0x0000001a: .cfi_remember_state
+// 0x0000001a: add sp, sp, #36
+// 0x0000001c: .cfi_def_cfa_offset: 92
+// 0x0000001c: vpop.f32 {s16-s31}
+// 0x00000020: .cfi_def_cfa_offset: 28
+// 0x00000020: .cfi_restore_extended: r80
+// 0x00000020: .cfi_restore_extended: r81
+// 0x00000020: .cfi_restore_extended: r82
+// 0x00000020: .cfi_restore_extended: r83
+// 0x00000020: .cfi_restore_extended: r84
+// 0x00000020: .cfi_restore_extended: r85
+// 0x00000020: .cfi_restore_extended: r86
+// 0x00000020: .cfi_restore_extended: r87
+// 0x00000020: .cfi_restore_extended: r88
+// 0x00000020: .cfi_restore_extended: r89
+// 0x00000020: .cfi_restore_extended: r90
+// 0x00000020: .cfi_restore_extended: r91
+// 0x00000020: .cfi_restore_extended: r92
+// 0x00000020: .cfi_restore_extended: r93
+// 0x00000020: .cfi_restore_extended: r94
+// 0x00000020: .cfi_restore_extended: r95
+// 0x00000020: pop {r5, r6, r7, r8, r10, r11, pc}
+// 0x00000024: .cfi_restore_state
+// 0x00000024: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kArm64[] = {
0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 277b794..f99f6a8 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -26,6 +26,7 @@
#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "memory_region.h"
#include "calling_convention.h"
#include "class_linker.h"
#include "compiled_method.h"
@@ -34,7 +35,9 @@
#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_env_ext.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "utils/managed_register.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/arm64/managed_register_arm64.h"
@@ -47,22 +50,32 @@
namespace art {
-static void CopyParameter(Assembler* jni_asm,
+template <PointerSize kPointerSize>
+static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
size_t frame_size, size_t out_arg_size);
-static void SetNativeParameter(Assembler* jni_asm,
+template <PointerSize kPointerSize>
+static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
JniCallingConvention* jni_conv,
ManagedRegister in_reg);
+template <PointerSize kPointerSize>
+static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler(
+ ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) {
+ return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features);
+}
+
// Generate the JNI bridge for the given method, general contract:
// - Arguments are in the managed runtime format, either on stack or in
// registers, a reference to the method object is supplied as part of this
// convention.
//
-CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
- uint32_t access_flags, uint32_t method_idx,
- const DexFile& dex_file) {
+template <PointerSize kPointerSize>
+static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
const bool is_native = (access_flags & kAccNative) != 0;
CHECK(is_native);
const bool is_static = (access_flags & kAccStatic) != 0;
@@ -70,7 +83,6 @@
const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
InstructionSet instruction_set = driver->GetInstructionSet();
const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
- const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
ArenaPool pool;
ArenaAllocator arena(&pool);
@@ -101,8 +113,8 @@
&arena, is_static, is_synchronized, jni_end_shorty, instruction_set));
// Assembler that holds generated instructions
- std::unique_ptr<Assembler> jni_asm(
- Assembler::Create(&arena, instruction_set, instruction_set_features));
+ std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
+ GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features);
jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo());
// Offsets into data structures
@@ -124,21 +136,12 @@
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
- if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<PointerSize::k64>(),
+ __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<kPointerSize>(),
+ mr_conv->InterproceduralScratchRegister());
+ __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(),
- main_jni_conv->HandleScopeOffset(),
- mr_conv->InterproceduralScratchRegister());
- } else {
- __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<PointerSize::k32>(),
- mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(),
- main_jni_conv->HandleScopeOffset(),
- mr_conv->InterproceduralScratchRegister());
- }
// 3. Place incoming reference arguments into handle scope
main_jni_conv->Next(); // Skip JNIEnv*
@@ -188,11 +191,7 @@
}
// 4. Write out the end of the quick frames.
- if (is_64_bit_target) {
- __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>());
- } else {
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>());
- }
+ __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>());
// 5. Move frame down to allow space for out going args.
const size_t main_out_arg_size = main_jni_conv->OutArgSize();
@@ -202,10 +201,8 @@
// Call the read barrier for the declaring class loaded from the method for a static call.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static) {
- ThreadOffset32 read_barrier32 =
- QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni);
- ThreadOffset64 read_barrier64 =
- QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni);
+ ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize,
+ pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv.
FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -225,21 +222,13 @@
// Pass the current thread as the second argument and call.
if (main_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
- if (is_64_bit_target) {
- __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier64),
- main_jni_conv->InterproceduralScratchRegister());
- } else {
- __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier32),
- main_jni_conv->InterproceduralScratchRegister());
- }
+ __ Call(main_jni_conv->CurrentParamRegister(),
+ Offset(read_barrier),
+ main_jni_conv->InterproceduralScratchRegister());
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
main_jni_conv->InterproceduralScratchRegister());
- if (is_64_bit_target) {
- __ CallFromThread64(read_barrier64, main_jni_conv->InterproceduralScratchRegister());
- } else {
- __ CallFromThread32(read_barrier32, main_jni_conv->InterproceduralScratchRegister());
- }
+ __ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister());
}
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset.
}
@@ -248,14 +237,10 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset32 jni_start32 =
+ ThreadOffset<kPointerSize> jni_start =
is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart);
- ThreadOffset64 jni_start64 =
- is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart);
+ ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
@@ -276,21 +261,13 @@
}
if (main_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
- if (is_64_bit_target) {
- __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64),
- main_jni_conv->InterproceduralScratchRegister());
- } else {
- __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32),
- main_jni_conv->InterproceduralScratchRegister());
- }
+ __ Call(main_jni_conv->CurrentParamRegister(),
+ Offset(jni_start),
+ main_jni_conv->InterproceduralScratchRegister());
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
main_jni_conv->InterproceduralScratchRegister());
- if (is_64_bit_target) {
- __ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister());
- } else {
- __ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister());
- }
+ __ CallFromThread(jni_start, main_jni_conv->InterproceduralScratchRegister());
}
if (is_synchronized) { // Check for exceptions from monitor enter.
__ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
@@ -352,20 +329,12 @@
if (main_jni_conv->IsCurrentParamInRegister()) {
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
- if (is_64_bit_target) {
- __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>());
- } else {
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>());
- }
+ __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset<kPointerSize>());
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
- if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(),
- main_jni_conv->InterproceduralScratchRegister());
- } else {
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(),
- main_jni_conv->InterproceduralScratchRegister());
- }
+ __ CopyRawPtrFromThread(jni_env,
+ Thread::JniEnvOffset<kPointerSize>(),
+ main_jni_conv->InterproceduralScratchRegister());
}
// 9. Plant call to native code associated with method.
@@ -398,7 +367,9 @@
+ static_cast<size_t>(kMipsPointerSize));
}
CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
- __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
+ __ Store(return_save_location,
+ main_jni_conv->ReturnRegister(),
+ main_jni_conv->SizeOfReturnValue());
}
// Increase frame size for out args if needed by the end_jni_conv.
@@ -414,27 +385,18 @@
}
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset32 jni_end32(-1);
- ThreadOffset64 jni_end64(-1);
+ ThreadOffset<kPointerSize> jni_end(-1);
if (reference_return) {
// Pass result.
- jni_end32 = is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32,
- pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference);
- jni_end64 = is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64,
- pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference);
+ jni_end = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end32 = is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd);
- jni_end64 = is_synchronized
- ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd);
+ jni_end = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -461,23 +423,13 @@
}
if (end_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(end_jni_conv->CurrentParamRegister());
- if (is_64_bit_target) {
- __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64),
- end_jni_conv->InterproceduralScratchRegister());
- } else {
- __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end32),
- end_jni_conv->InterproceduralScratchRegister());
- }
+ __ Call(end_jni_conv->CurrentParamRegister(),
+ Offset(jni_end),
+ end_jni_conv->InterproceduralScratchRegister());
} else {
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
- if (is_64_bit_target) {
- __ CallFromThread64(ThreadOffset64(jni_end64),
- end_jni_conv->InterproceduralScratchRegister());
- } else {
- __ CallFromThread32(ThreadOffset32(jni_end32),
- end_jni_conv->InterproceduralScratchRegister());
- }
+ __ CallFromThread(jni_end, end_jni_conv->InterproceduralScratchRegister());
}
// 13. Reload return value
@@ -517,7 +469,8 @@
}
// Copy a single parameter from the managed to the JNI calling convention.
-static void CopyParameter(Assembler* jni_asm,
+template <PointerSize kPointerSize>
+static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
size_t frame_size, size_t out_arg_size) {
@@ -606,7 +559,8 @@
}
}
-static void SetNativeParameter(Assembler* jni_asm,
+template <PointerSize kPointerSize>
+static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
JniCallingConvention* jni_conv,
ManagedRegister in_reg) {
if (jni_conv->IsCurrentParamOnStack()) {
@@ -621,7 +575,13 @@
CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags,
uint32_t method_idx, const DexFile& dex_file) {
- return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file);
+ if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
+ return ArtJniCompileMethodInternal<PointerSize::k64>(
+ compiler, access_flags, method_idx, dex_file);
+ } else {
+ return ArtJniCompileMethodInternal<PointerSize::k32>(
+ compiler, access_flags, method_idx, dex_file);
+ }
}
} // namespace art
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index d4dd978..2471f79 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -31,10 +31,6 @@
}
uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
- // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it
- // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk
- // of code. To avoid any alignment discrepancies for the final chunk, we always align the
- // offset after reserving of writing any chunk.
uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset,
MethodReference(nullptr, 0u),
@@ -46,7 +42,7 @@
unprocessed_patches_.clear();
thunk_locations_.push_back(aligned_offset);
- offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_);
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -65,13 +61,7 @@
if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk_code_)))) {
return 0u;
}
- uint32_t thunk_end_offset = aligned_offset + thunk_code_.size();
- // Align after writing chunk, see the ReserveSpace() above.
- offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_);
- aligned_code_delta = offset - thunk_end_offset;
- if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
- return 0u;
- }
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -92,7 +82,7 @@
MethodReference method_ref,
uint32_t max_extra_space) {
uint32_t quick_code_size = compiled_method->GetQuickCode().size();
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
// Adjust for extra space required by the subclass.
next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space);
@@ -106,9 +96,9 @@
if (needs_thunk) {
// A single thunk will cover all pending patches.
unprocessed_patches_.clear();
- uint32_t thunk_location = compiled_method->AlignCode(offset);
+ uint32_t thunk_location = CompiledMethod::AlignCode(offset, instruction_set_);
thunk_locations_.push_back(thunk_location);
- offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_);
+ offset = thunk_location + thunk_code_.size();
}
}
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index a8078e3..eace3d4 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -48,18 +48,18 @@
const ArrayRef<const LinkerPatch>& method3_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArmAlignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kThumb2) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
// We want to put the method3 at a very precise offset.
const uint32_t method3_offset = method1_offset + distance_without_thunks;
- CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment);
+ CHECK_ALIGNED(method3_offset, kArmAlignment);
// Calculate size of method2 so that we put method3 at the correct place.
+ const uint32_t method1_end = method1_offset + method1_code.size();
const uint32_t method2_offset =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kThumb2) +
- sizeof(OatQuickMethodHeader);
+ method1_end + CodeAlignmentSize(method1_end) + sizeof(OatQuickMethodHeader);
const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset);
std::vector<uint8_t> method2_raw_code(method2_size);
ArrayRef<const uint8_t> method2_code(method2_raw_code);
@@ -78,8 +78,11 @@
if (result3.second == method3_offset + 1 /* thumb mode */) {
return false; // No thunk.
} else {
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kThumb2);
- CHECK_EQ(result3.second, method3_offset + aligned_thunk_size + 1 /* thumb mode */);
+ uint32_t thunk_end =
+ CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+ ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
return true; // Thunk present.
}
}
@@ -352,9 +355,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t method3_offset = GetMethodOffset(3u);
+ ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArmAlignment>(method3_header_offset));
- uint32_t thunk_offset = method3_header_offset - CompiledCode::AlignCode(ThunkSize(), kThumb2);
+ uint32_t thunk_offset =
+ RoundDown(method3_header_offset - ThunkSize(), GetInstructionSetAlignment(kThumb2));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ method3_header_offset);
ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */);
ASSERT_EQ(diff & 1u, 0u);
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index fdd14be..4c8788e 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -83,7 +83,7 @@
// Now that we have the actual offset where the code will be placed, locate the ADRP insns
// that actually require the thunk.
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
DCHECK(compiled_method != nullptr);
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 09729fd..573de73 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -67,36 +67,39 @@
const ArrayRef<const LinkerPatch>& last_method_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
- const uint32_t gap_start =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
+ const uint32_t gap_start = method1_offset + method1_code.size();
// We want to put the method3 at a very precise offset.
const uint32_t last_method_offset = method1_offset + distance_without_thunks;
+ CHECK_ALIGNED(last_method_offset, kArm64Alignment);
const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
- CHECK_ALIGNED(gap_end, kArm64Alignment);
- // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
+ // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB).
// (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
- // offsets by this test.)
+ // offsets by this test. Making the first chunk bigger makes it easy to give all intermediate
+ // methods the same alignment of the end, so the thunk insertion adds a predictable size as
+ // long as it's after the first chunk.)
uint32_t method_idx = 2u;
constexpr uint32_t kSmallChunkSize = 2 * MB;
std::vector<uint8_t> gap_code;
- size_t gap_size = gap_end - gap_start;
- for (; gap_size >= 2u * kSmallChunkSize; gap_size -= kSmallChunkSize) {
- uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
+ uint32_t gap_size = gap_end - gap_start;
+ uint32_t num_small_chunks = std::max(gap_size / kSmallChunkSize, 1u) - 1u;
+ uint32_t chunk_start = gap_start;
+ uint32_t chunk_size = gap_size - num_small_chunks * kSmallChunkSize;
+ for (uint32_t i = 0; i <= num_small_chunks; ++i) { // num_small_chunks+1 iterations.
+ uint32_t chunk_code_size =
+ chunk_size - CodeAlignmentSize(chunk_start) - sizeof(OatQuickMethodHeader);
gap_code.resize(chunk_code_size, 0u);
AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
ArrayRef<const LinkerPatch>());
method_idx += 1u;
+ chunk_start += chunk_size;
+ chunk_size = kSmallChunkSize; // For all but the first chunk.
+ DCHECK_EQ(CodeAlignmentSize(gap_end), CodeAlignmentSize(chunk_start));
}
- uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
- gap_code.resize(chunk_code_size, 0u);
- AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
- ArrayRef<const LinkerPatch>());
- method_idx += 1u;
// Add the last method and link
AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
@@ -109,8 +112,9 @@
// There may be a thunk before method2.
if (last_result.second != last_method_offset) {
// Thunk present. Check that there's only one.
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kArm64);
- CHECK_EQ(last_result.second, last_method_offset + aligned_thunk_size);
+ uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
}
return method_idx;
}
@@ -341,7 +345,7 @@
uint32_t dex_cache_arrays_begin,
uint32_t element_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -391,7 +395,7 @@
bool has_thunk,
uint32_t string_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -614,10 +618,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t last_method_offset = GetMethodOffset(last_method_idx);
+ ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_header_offset));
- uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset));
+ uint32_t thunk_offset =
+ RoundDown(last_method_header_offset - ThunkSize(), GetInstructionSetAlignment(kArm64));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ last_method_header_offset);
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index ec69107..d21f33e 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -98,6 +98,14 @@
patches));
}
+ uint32_t CodeAlignmentSize(uint32_t header_offset_to_align) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset_to_align + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset =
+ CompiledMethod::AlignCode(unaligned_code_offset, instruction_set_);
+ return aligned_code_offset - unaligned_code_offset;
+ }
+
void Link() {
// Reserve space.
static_assert(kTrampolineOffset == 0u, "Unexpected trampoline offset.");
@@ -106,9 +114,8 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->ReserveSpace(offset, compiled_method.get(), compiled_method_refs_[idx]);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ offset += alignment_size;
offset += sizeof(OatQuickMethodHeader);
uint32_t quick_code_offset = offset + compiled_method->CodeDelta();
@@ -136,11 +143,10 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->WriteThunks(&out_, offset);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- CHECK_LE(aligned_code_delta, sizeof(kPadding));
- out_.WriteFully(kPadding, aligned_code_delta);
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ CHECK_LE(alignment_size, sizeof(kPadding));
+ out_.WriteFully(kPadding, alignment_size);
+ offset += alignment_size;
out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
offset += sizeof(OatQuickMethodHeader);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f20c715..8273b15 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -87,6 +87,13 @@
OatHeader* const oat_header_;
};
+inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& compiled_method) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset = compiled_method.AlignCode(unaligned_code_offset);
+ return aligned_code_offset - unaligned_code_offset;
+}
+
} // anonymous namespace
// Defines the location of the raw dex file to write.
@@ -817,8 +824,8 @@
uint32_t thumb_offset) {
offset_ = writer_->relative_patcher_->ReserveSpace(
offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex()));
- offset_ = compiled_method->AlignCode(offset_);
- DCHECK_ALIGNED_PARAM(offset_,
+ offset_ += CodeAlignmentSize(offset_, *compiled_method);
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
return offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
}
@@ -1011,17 +1018,16 @@
ReportWriteFailure("relative call thunk", it);
return false;
}
- uint32_t aligned_offset = compiled_method->AlignCode(offset_);
- uint32_t aligned_code_delta = aligned_offset - offset_;
- if (aligned_code_delta != 0) {
- if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) {
+ uint32_t alignment_size = CodeAlignmentSize(offset_, *compiled_method);
+ if (alignment_size != 0) {
+ if (!writer_->WriteCodeAlignment(out, alignment_size)) {
ReportWriteFailure("code alignment padding", it);
return false;
}
- offset_ += aligned_code_delta;
+ offset_ += alignment_size;
DCHECK_OFFSET_();
}
- DCHECK_ALIGNED_PARAM(offset_,
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
DCHECK_EQ(method_offsets.code_offset_,
offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4a4b98c..5152075 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -765,16 +765,24 @@
LocationSummary* locations = instruction->GetLocations();
uint32_t register_mask = locations->GetRegisterMask();
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
+ if (instruction->IsSuspendCheck()) {
+ // Suspend check has special ABI that saves the caller-save registers in callee,
+ // so we want to emit stack maps containing the registers.
+ // TODO: Register allocator still reserves space for the caller-save registers.
+ // We should add slow-path-specific caller-save information into LocationSummary
+ // and refactor the code here as well as in the register allocator to use it.
+ } else {
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the caller-save from the mask, since
+ // they will be overwritten by the callee.
+ register_mask &= core_callee_save_mask_;
+ }
+ // The register mask must be a subset of callee-save registers.
+ DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
- // The register mask must be a subset of callee-save registers.
- DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
@@ -1174,7 +1182,7 @@
<< "instruction->DebugName()=" << instruction->DebugName()
<< " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
- DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal())
+ DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ad02ecf..fd396c4 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -340,6 +340,9 @@
bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
+ bool IsBlockedCoreRegister(size_t i) { return blocked_core_registers_[i]; }
+ bool IsBlockedFloatingPointRegister(size_t i) { return blocked_fpu_registers_[i]; }
+
// Helper that returns the pointer offset of an index in an object array.
// Note: this method assumes we always have the same pointer size, regardless
// of the architecture.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cd7a90e..5eaf11e 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -119,11 +119,9 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
@@ -434,6 +432,11 @@
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
// No need to save live registers; it's taken care of by the
@@ -514,6 +517,11 @@
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -4469,8 +4477,6 @@
Primitive::Type type = instruction->GetType();
HInstruction* array_instr = instruction->GetArray();
bool has_intermediate_address = array_instr->IsIntermediateAddress();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
switch (type) {
case Primitive::kPrimBoolean:
@@ -4505,6 +4511,11 @@
}
case Primitive::kPrimNot: {
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
@@ -4647,8 +4658,6 @@
Location value_loc = locations->InAt(2);
HInstruction* array_instr = instruction->GetArray();
bool has_intermediate_address = array_instr->IsIntermediateAddress();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -4913,8 +4922,6 @@
}
void LocationsBuilderARM::VisitIntermediateAddress(HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -4929,9 +4936,6 @@
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
-
if (second.IsRegister()) {
__ add(out.AsRegister<Register>(),
first.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 115cee6..9ceb310 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -398,11 +398,9 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -600,6 +598,11 @@
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
// No need to save live registers; it's taken care of by the
@@ -609,6 +612,8 @@
DCHECK_NE(obj_.reg(), LR);
DCHECK_NE(obj_.reg(), WSP);
DCHECK_NE(obj_.reg(), WZR);
+ // WIP0 is used by the slow path as a temp, it can not be the object register.
+ DCHECK_NE(obj_.reg(), IP0);
DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
// "Compact" slow path, saving two moves.
//
@@ -680,7 +685,9 @@
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
DCHECK(!(instruction_->IsArrayGet() &&
instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
@@ -751,10 +758,7 @@
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
- DCHECK(index_.IsRegisterPair());
- // UnsafeGet's offset location is a register pair, the low
- // part contains the correct offset.
- index = index_.ToLow();
+ DCHECK(index_.IsRegister());
}
}
@@ -1284,17 +1288,21 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
- if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
- temp = temps.AcquireW();
- } else if (src_cst->IsLongConstant()) {
- temp = temps.AcquireX();
- } else if (src_cst->IsFloatConstant()) {
- temp = temps.AcquireS();
+ if (src_cst->IsZeroBitPattern()) {
+ temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) ? xzr : wzr;
} else {
- DCHECK(src_cst->IsDoubleConstant());
- temp = temps.AcquireD();
+ if (src_cst->IsIntConstant()) {
+ temp = temps.AcquireW();
+ } else if (src_cst->IsLongConstant()) {
+ temp = temps.AcquireX();
+ } else if (src_cst->IsFloatConstant()) {
+ temp = temps.AcquireS();
+ } else {
+ DCHECK(src_cst->IsDoubleConstant());
+ temp = temps.AcquireD();
+ }
+ MoveConstant(temp, src_cst);
}
- MoveConstant(temp, src_cst);
__ Str(temp, StackOperandFrom(destination));
} else {
DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
@@ -1982,8 +1990,6 @@
}
void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -1991,10 +1997,7 @@
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM64::VisitIntermediateAddress(
- HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
+void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
__ Add(OutputRegister(instruction),
InputRegisterAt(instruction, 0),
Operand(InputOperandAt(instruction, 1)));
@@ -2090,11 +2093,15 @@
// Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
BlockPoolsScope block_pools(masm);
+ // The read barrier instrumentation of object ArrayGet instructions
+ // does not support the HIntermediateAddress instruction.
+ DCHECK(!((type == Primitive::kPrimNot) &&
+ instruction->GetArray()->IsIntermediateAddress() &&
+ kEmitCompilerReadBarrier));
+
if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
Register temp = temps.AcquireW();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!instruction->GetArray()->IsIntermediateAddress());
// Note that a potential implicit null check is handled in the
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -2108,9 +2115,6 @@
} else {
Register temp = temps.AcquireSameSizeAs(obj);
if (instruction->GetArray()->IsIntermediateAddress()) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
// We do not need to compute the intermediate address from the array: the
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
@@ -2200,9 +2204,6 @@
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireSameSizeAs(array);
if (instruction->GetArray()->IsIntermediateAddress()) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
// We do not need to compute the intermediate address from the array: the
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
@@ -2222,7 +2223,6 @@
codegen_->Store(value_type, value, destination);
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
- DCHECK(needs_write_barrier);
DCHECK(!instruction->GetArray()->IsIntermediateAddress());
vixl::aarch64::Label done;
SlowPathCodeARM64* slow_path = nullptr;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 240936c..1b5fa85 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -243,7 +243,7 @@
}
Arm64Assembler* GetAssembler() const { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -364,7 +364,7 @@
private:
Arm64Assembler* GetAssembler() const;
vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
- return GetAssembler()->vixl_masm_;
+ return GetAssembler()->GetVIXLAssembler();
}
CodeGeneratorARM64* const codegen_;
@@ -413,7 +413,7 @@
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
void MarkGCCard(vixl::aarch64::Register object,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8dd82ef..59e103a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -351,14 +351,12 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickTestSuspend));
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 3472830..fe1fddc 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -300,13 +300,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ Bc(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a2fa245..ade2117 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -192,13 +192,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5d5fa85..eadb431 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -149,13 +149,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 0ce0ec1..58e700d 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -31,13 +31,11 @@
public:
HDeadCodeElimination(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
- const char* name = kInitialDeadCodeEliminationPassName)
+ const char* name = kDeadCodeEliminationPassName)
: HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
-
- static constexpr const char* kInitialDeadCodeEliminationPassName = "dead_code_elimination";
- static constexpr const char* kFinalDeadCodeEliminationPassName = "dead_code_elimination_final";
+ static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
private:
void MaybeRecordDeadBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.h b/compiler/optimizing/dex_cache_array_fixups_arm.h
index 015f910..9142e29 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.h
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.h
@@ -26,7 +26,9 @@
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_arm", stats) {}
+ : HOptimization(graph, kDexCacheArrayFixupsArmPassName, stats) {}
+
+ static constexpr const char* kDexCacheArrayFixupsArmPassName = "dex_cache_array_fixups_arm";
void Run() OVERRIDE;
};
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.h b/compiler/optimizing/dex_cache_array_fixups_mips.h
index 21056e1..861a199 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.h
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.h
@@ -29,9 +29,11 @@
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_mips", stats),
+ : HOptimization(graph, kDexCacheArrayFixupsMipsPassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kDexCacheArrayFixupsMipsPassName = "dex_cache_array_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 0b4c569..89d80cc 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -298,6 +298,12 @@
stream << constant->AsIntConstant()->GetValue();
} else if (constant->IsLongConstant()) {
stream << constant->AsLongConstant()->GetValue();
+ } else if (constant->IsFloatConstant()) {
+ stream << constant->AsFloatConstant()->GetValue();
+ } else if (constant->IsDoubleConstant()) {
+ stream << constant->AsDoubleConstant()->GetValue();
+ } else if (constant->IsNullConstant()) {
+ stream << "null";
}
} else if (location.IsInvalid()) {
stream << "invalid";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 7c74816..cd4c830 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -39,9 +39,9 @@
void Run() OVERRIDE;
- private:
static constexpr const char* kInductionPassName = "induction_var_analysis";
+ private:
struct NodeInfo {
explicit NodeInfo(uint32_t d) : depth(d), done(false) {}
uint32_t depth;
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 3d297da..782110c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -48,7 +48,9 @@
class InstructionSimplifierArm : public HOptimization {
public:
InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm", stats) {}
+ : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
+
+ static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
void Run() OVERRIDE {
InstructionSimplifierArmVisitor visitor(graph_, stats_);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 28648b3..f71684e 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -82,8 +82,9 @@
class InstructionSimplifierArm64 : public HOptimization {
public:
InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm64", stats) {}
-
+ : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
+ static constexpr const char* kInstructionSimplifierArm64PassName
+ = "instruction_simplifier_arm64";
void Run() OVERRIDE {
InstructionSimplifierArm64Visitor visitor(graph_, stats_);
visitor.VisitReversePostOrder();
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 8f7778f..6632cd9 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -231,15 +231,6 @@
HInstruction* array,
HInstruction* index,
size_t data_offset) {
- if (kEmitCompilerReadBarrier) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- //
- // TODO: Handle this case properly in the ARM64 and ARM code generator and
- // re-enable this optimization; otherwise, remove this TODO.
- // b/26601270
- return false;
- }
if (index->IsConstant() ||
(index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
// When the index is a constant all the addressing can be fitted in the
@@ -251,6 +242,13 @@
// The access may require a runtime call or the original array pointer.
return false;
}
+ if (kEmitCompilerReadBarrier &&
+ access->IsArrayGet() &&
+ access->AsArrayGet()->GetType() == Primitive::kPrimNot) {
+ // For object arrays, the read barrier instrumentation requires
+ // the original array pointer.
+ return false;
+ }
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index be061f5..27d9d48 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1212,7 +1212,7 @@
void IntrinsicLocationsBuilderARM::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1232,7 +1232,7 @@
void IntrinsicLocationsBuilderARM::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1250,7 +1250,7 @@
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1311,7 +1311,7 @@
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 06d1148..e7c40e6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -26,7 +26,6 @@
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/arm64/constants_arm64.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -62,14 +61,14 @@
} // namespace
MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
- return codegen_->GetAssembler()->vixl_masm_;
+ return codegen_->GetVIXLAssembler();
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
-#define __ codegen->GetAssembler()->vixl_masm_->
+#define __ codegen->GetVIXLAssembler()->
static void MoveFromReturnRegister(Location trg,
Primitive::Type type,
@@ -782,7 +781,7 @@
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot));
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Location base_loc = locations->InAt(1);
Register base = WRegisterFrom(base_loc); // Object pointer.
Location offset_loc = locations->InAt(2);
@@ -916,7 +915,7 @@
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
@@ -1035,7 +1034,7 @@
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register out = WRegisterFrom(locations->Out()); // Boolean result.
@@ -1409,7 +1408,7 @@
void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1429,7 +1428,7 @@
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
@@ -1447,7 +1446,7 @@
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -1506,7 +1505,7 @@
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 9449f79..55e1ab2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2070,7 +2070,7 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -2095,7 +2095,7 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -2121,7 +2121,7 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -2186,7 +2186,7 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8d4d3e5..1e18540 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1707,7 +1707,7 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -1728,7 +1728,7 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
@@ -1748,7 +1748,7 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1816,7 +1816,7 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 65f4def..dc409c9 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -752,8 +752,6 @@
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-// Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
-// as it needs 64 bit instructions.
void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
// See intrinsics.h.
if (!kRoundIsPlusPointFive) {
@@ -762,10 +760,17 @@
// Do we have instruction support?
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
+ HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(static_or_direct != nullptr);
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
+ if (static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(
+ static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -774,7 +779,7 @@
// We have to fall back to a call to the intrinsic.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -784,47 +789,42 @@
void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- if (locations->WillCall()) {
+ if (locations->WillCall()) { // TODO: can we reach this?
InvokeOutOfLineIntrinsic(codegen_, invoke);
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
Register out = locations->Out().AsRegister<Register>();
- XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ NearLabel skip_incr, done;
X86Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
- __ movd(inPlusPointFive, out);
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ __ comiss(t2, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(0.5f), constant_area));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(1.0f), constant_area));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
__ movl(out, Immediate(kPrimIntMax));
- // maxInt = int-to-float(out)
- __ cvtsi2ss(maxInt, out);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, maxInt);
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
@@ -1216,7 +1216,7 @@
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1490,7 +1490,7 @@
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1543,7 +1543,7 @@
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7e0d729..7dfbfb0 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -583,6 +583,7 @@
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -598,9 +599,10 @@
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
// See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ if (!kRoundIsPlusPointFive) {
+ return;
}
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -610,47 +612,45 @@
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f));
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ __ comiss(t2, codegen_->LiteralFloatAddress(0.5f));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralFloatAddress(1.0f));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxInt into out.
- codegen_->Load64BitValue(out, kPrimIntMax);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
+ codegen_->Load32BitValue(out, kPrimIntMax);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
// See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ if (!kRoundIsPlusPointFive) {
+ return;
}
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -660,39 +660,36 @@
return;
}
- // Implement RoundDouble as t1 = floor(input + 0.5); convert to long.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5)
+ // result = result + 1.0f;
+ __ movsd(t2, in);
+ __ roundsd(t1, in, Immediate(1));
+ __ subsd(t2, t1);
+ __ comisd(t2, codegen_->LiteralDoubleAddress(0.5));
+ __ j(kBelow, &skip_incr);
+ __ addsd(t1, codegen_->LiteralDoubleAddress(1.0f));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addsd(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxLong into out.
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
-
- // if inPlusPointFive >= maxLong goto done
- __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = double-to-long-truncate(input)
- __ cvttsd2si(out, inPlusPointFive, /* is64bit */ true);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ comisd(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttsd2si(out, t1, /* is64bit */ true);
__ Bind(&done);
}
@@ -1303,7 +1300,7 @@
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1577,7 +1574,7 @@
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -1634,7 +1631,7 @@
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kCallOnMainAndSlowPath,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 7a78bfd..5fdfb9b 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -376,6 +376,10 @@
return PolicyField::Decode(GetPayload());
}
+ bool RequiresRegisterKind() const {
+ return GetPolicy() == kRequiresRegister || GetPolicy() == kRequiresFpuRegister;
+ }
+
uintptr_t GetEncoding() const {
return GetPayload();
}
@@ -480,6 +484,7 @@
public:
enum CallKind {
kNoCall,
+ kCallOnMainAndSlowPath,
kCallOnSlowPath,
kCallOnMainOnly
};
@@ -540,10 +545,29 @@
Location Out() const { return output_; }
- bool CanCall() const { return call_kind_ != kNoCall; }
- bool WillCall() const { return call_kind_ == kCallOnMainOnly; }
- bool OnlyCallsOnSlowPath() const { return call_kind_ == kCallOnSlowPath; }
- bool NeedsSafepoint() const { return CanCall(); }
+ bool CanCall() const {
+ return call_kind_ != kNoCall;
+ }
+
+ bool WillCall() const {
+ return call_kind_ == kCallOnMainOnly || call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool CallsOnSlowPath() const {
+ return call_kind_ == kCallOnSlowPath || call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool OnlyCallsOnSlowPath() const {
+ return call_kind_ == kCallOnSlowPath;
+ }
+
+ bool CallsOnMainAndSlowPath() const {
+ return call_kind_ == kCallOnMainAndSlowPath;
+ }
+
+ bool NeedsSafepoint() const {
+ return CanCall();
+ }
void SetStackBit(uint32_t index) {
stack_mask_->SetBit(index);
@@ -629,8 +653,7 @@
// Whether these are locations for an intrinsified call.
bool intrinsified_;
- ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
- ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
+ friend class RegisterAllocatorTest;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 2f59d4c..0819fb0 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -37,7 +37,10 @@
virtual ~HOptimization() {}
- // Return the name of the pass.
+ // Return the name of the pass. Pass names for a single HOptimization should be of form
+ // <optimization_name> or <optimization_name>$<pass_name> for common <optimization_name> prefix.
+ // Example: 'instruction_simplifier', 'instruction_simplifier$after_bce',
+ // 'instruction_simplifier$before_codegen'.
const char* GetPassName() const { return pass_name_; }
// Perform the analysis itself.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d5b0d77..f7c82d1 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -95,6 +95,8 @@
static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
+static constexpr const char* kPassNameSeparator = "$";
+
/**
* Used by the code generator, to allocate the code in a vector.
*/
@@ -266,7 +268,7 @@
class OptimizingCompiler FINAL : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
- ~OptimizingCompiler();
+ ~OptimizingCompiler() OVERRIDE;
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
@@ -305,17 +307,17 @@
OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- protected:
- virtual void RunOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ private:
+ void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const;
- virtual void RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) const;
+ void RunOptimizations(HOptimization* optimizations[],
+ size_t length,
+ PassObserver* pass_observer) const;
private:
// Create a 'CompiledMethod' for an optimized graph.
@@ -420,6 +422,117 @@
|| instruction_set == kX86_64;
}
+static HOptimization* BuildOptimization(
+ const std::string& opt_name,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles,
+ SideEffectsAnalysis* most_recent_side_effects,
+ HInductionVarAnalysis* most_recent_induction) {
+ if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
+ return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
+ return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ } else if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
+ return new (arena) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
+ } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) GVNOptimization(graph, *most_recent_side_effects);
+ } else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
+ return new (arena) HConstantFolding(graph);
+ } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
+ return new (arena) HDeadCodeElimination(graph, stats);
+ } else if (opt_name == HInliner::kInlinerPassName) {
+ size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
+ return new (arena) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* depth */ 0);
+ } else if (opt_name == HSharpening::kSharpeningPassName) {
+ return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
+ } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
+ return new (arena) HSelectGenerator(graph, stats);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ return new (arena) HInductionVarAnalysis(graph);
+ } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
+ return new (arena) InstructionSimplifier(graph, stats);
+ } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
+ return new (arena) IntrinsicsRecognizer(graph, driver, stats);
+ } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LoadStoreElimination(graph, *most_recent_side_effects);
+ } else if (opt_name == mips::DexCacheArrayFixups::kDexCacheArrayFixupsMipsPassName) {
+ return new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
+ } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
+ return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ return new (arena) SideEffectsAnalysis(graph);
+ } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
+ return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
+ return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ }
+ return nullptr;
+}
+
+static ArenaVector<HOptimization*> BuildOptimizations(
+ const std::vector<std::string>& pass_names,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles) {
+ // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
+ // instances. This method assumes that each of them expects the nearest instance preceeding it
+ // in the pass name list.
+ SideEffectsAnalysis* most_recent_side_effects = nullptr;
+ HInductionVarAnalysis* most_recent_induction = nullptr;
+ ArenaVector<HOptimization*> ret(arena->Adapter());
+ for (std::string pass_name : pass_names) {
+ size_t pos = pass_name.find(kPassNameSeparator); // Strip suffix to get base pass name.
+ std::string opt_name = pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
+
+ HOptimization* opt = BuildOptimization(
+ opt_name,
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles,
+ most_recent_side_effects,
+ most_recent_induction);
+ CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
+ ret.push_back(opt);
+
+ if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
+ }
+ }
+ return ret;
+}
+
void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
size_t length,
PassObserver* pass_observer) const {
@@ -444,11 +557,11 @@
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
HInliner* inliner = new (graph->GetArena()) HInliner(
- graph,
- graph,
+ graph, // outer_graph
+ graph, // outermost_graph
codegen,
- dex_compilation_unit,
- dex_compilation_unit,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
driver,
handles,
stats,
@@ -473,7 +586,7 @@
arm::InstructionSimplifierArm* simplifier =
new (arena) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -489,7 +602,7 @@
arm64::InstructionSimplifierArm64* simplifier =
new (arena) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -518,7 +631,7 @@
x86::PcRelativeFixups* pc_relative_fixups =
new (arena) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
pc_relative_fixups,
memory_gen
@@ -530,7 +643,7 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
memory_gen
};
@@ -546,7 +659,8 @@
NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
static void AllocateRegisters(HGraph* graph,
CodeGenerator* codegen,
- PassObserver* pass_observer) {
+ PassObserver* pass_observer,
+ RegisterAllocator::Strategy strategy) {
{
PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
pass_observer);
@@ -559,7 +673,7 @@
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
}
}
@@ -571,15 +685,30 @@
StackHandleScopeCollection* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
+ if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
+ ArenaVector<HOptimization*> optimizations = BuildOptimizations(
+ *driver->GetCompilerOptions().GetPassesToRun(),
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles);
+ RunOptimizations(&optimizations[0], optimizations.size(), pass_observer);
+ return;
+ }
+
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$initial");
HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce");
+ HConstantFolding* fold2 = new (arena) HConstantFolding(
+ graph, "constant_folding$after_inlining");
+ HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects, stats);
@@ -588,9 +717,9 @@
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_after_bce");
+ graph, stats, "instruction_simplifier$after_bce");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_before_codegen");
+ graph, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats);
HOptimization* optimizations1[] = {
@@ -626,7 +755,6 @@
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
- AllocateRegisters(graph, codegen, pass_observer);
}
static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
@@ -841,6 +969,10 @@
&pass_observer,
&handles);
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
+
codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
}
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 1e8b071..5a7397b 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -32,6 +32,8 @@
: HOptimization(graph, "pc_relative_fixups_mips", stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 921f3df..ad0921d 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -227,6 +227,7 @@
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMinDoubleDouble:
case Intrinsics::kMathMinFloatFloat:
+ case Intrinsics::kMathRoundFloat:
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index 03de2fc..72fa71e 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -29,9 +29,11 @@
class PcRelativeFixups : public HOptimization {
public:
PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "pc_relative_fixups_x86", stats),
+ : HOptimization(graph, kPcRelativeFixupsX86PassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2367ce1..5b768d5 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "code_generator.h"
+#include "register_allocator_graph_color.h"
#include "register_allocator_linear_scan.h"
#include "ssa_liveness_analysis.h"
@@ -41,6 +42,8 @@
switch (strategy) {
case kRegisterAllocatorLinearScan:
return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+ case kRegisterAllocatorGraphColor:
+ return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis);
default:
LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
UNREACHABLE();
@@ -163,6 +166,19 @@
} else {
codegen.DumpFloatingPointRegister(message, current->GetRegister());
}
+ for (LiveInterval* interval : intervals) {
+ if (interval->HasRegister()
+ && interval->GetRegister() == current->GetRegister()
+ && interval->CoversSlow(j)) {
+ message << std::endl;
+ if (interval->GetDefinedBy() != nullptr) {
+ message << interval->GetDefinedBy()->GetKind() << " ";
+ } else {
+ message << "physical ";
+ }
+ interval->Dump(message);
+ }
+ }
LOG(FATAL) << message.str();
} else {
return false;
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 729eede..7e1fff8 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -40,7 +40,8 @@
class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
- kRegisterAllocatorLinearScan
+ kRegisterAllocatorLinearScan,
+ kRegisterAllocatorGraphColor
};
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
new file mode 100644
index 0000000..79ca5a0
--- /dev/null
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -0,0 +1,1012 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocator_graph_color.h"
+
+#include "code_generator.h"
+#include "register_allocation_resolver.h"
+#include "ssa_liveness_analysis.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// Highest number of registers that we support for any platform. This can be used for std::bitset,
+// for example, which needs to know its size at compile time.
+static constexpr size_t kMaxNumRegs = 32;
+
+// The maximum number of graph coloring attempts before triggering a DCHECK.
+// This is meant to catch changes to the graph coloring algorithm that undermine its forward
+// progress guarantees. Forward progress for the algorithm means splitting live intervals on
+// every graph coloring attempt so that eventually the interference graph will be sparse enough
+// to color. The main threat to forward progress is trying to split short intervals which cannot be
+// split further; this could cause infinite looping because the interference graph would never
+// change. This is avoided by prioritizing short intervals before long ones, so that long
+// intervals are split when coloring fails.
+static constexpr size_t kMaxGraphColoringAttemptsDebug = 100;
+
+// Interference nodes make up the interference graph, which is the primary data structure in
+// graph coloring register allocation. Each node represents a single live interval, and contains
+// a set of adjacent nodes corresponding to intervals overlapping with its own. To save memory,
+// pre-colored nodes never contain outgoing edges (only incoming ones).
+//
+// As nodes are pruned from the interference graph, incoming edges of the pruned node are removed,
+// but outgoing edges remain in order to later color the node based on the colors of its neighbors.
+//
+// Note that a pair interval is represented by a single node in the interference graph, which
+// essentially requires two colors. One consequence of this is that the degree of a node is not
+// necessarily equal to the number of adjacent nodes--instead, the degree reflects the maximum
+// number of colors with which a node could interfere. We model this by giving edges different
+// weights (1 or 2) to control how much it increases the degree of adjacent nodes.
+// For example, the edge between two single nodes will have weight 1. On the other hand,
+// the edge between a single node and a pair node will have weight 2. This is because the pair
+// node could block up to two colors for the single node, and because the single node could
+// block an entire two-register aligned slot for the pair node.
+// The degree is defined this way because we use it to decide whether a node is guaranteed a color,
+// and thus whether it is safe to prune it from the interference graph early on.
+class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> {
+ public:
+ InterferenceNode(ArenaAllocator* allocator, LiveInterval* interval, size_t id)
+ : interval_(interval),
+ adjacent_nodes_(CmpPtr, allocator->Adapter(kArenaAllocRegisterAllocator)),
+ out_degree_(0),
+ id_(id) {}
+
+ // Used to maintain determinism when storing InterferenceNode pointers in sets.
+ static bool CmpPtr(const InterferenceNode* lhs, const InterferenceNode* rhs) {
+ return lhs->id_ < rhs->id_;
+ }
+
+ void AddInterference(InterferenceNode* other) {
+ if (adjacent_nodes_.insert(other).second) {
+ out_degree_ += EdgeWeightWith(other);
+ }
+ }
+
+ void RemoveInterference(InterferenceNode* other) {
+ if (adjacent_nodes_.erase(other) > 0) {
+ out_degree_ -= EdgeWeightWith(other);
+ }
+ }
+
+ bool ContainsInterference(InterferenceNode* other) const {
+ return adjacent_nodes_.count(other) > 0;
+ }
+
+ LiveInterval* GetInterval() const {
+ return interval_;
+ }
+
+ const ArenaSet<InterferenceNode*, decltype(&CmpPtr)>& GetAdjacentNodes() const {
+ return adjacent_nodes_;
+ }
+
+ size_t GetOutDegree() const {
+ return out_degree_;
+ }
+
+ size_t GetId() const {
+ return id_;
+ }
+
+ private:
+ // We give extra weight to edges adjacent to pair nodes. See the general comment on the
+ // interference graph above.
+ size_t EdgeWeightWith(InterferenceNode* other) const {
+ return (interval_->HasHighInterval() || other->interval_->HasHighInterval()) ? 2 : 1;
+ }
+
+ // The live interval that this node represents.
+ LiveInterval* const interval_;
+
+ // All nodes interfering with this one.
+ // TODO: There is potential to use a cheaper data structure here, especially since
+ // adjacency sets will usually be small.
+ ArenaSet<InterferenceNode*, decltype(&CmpPtr)> adjacent_nodes_;
+
+ // The maximum number of colors with which this node could interfere. This could be more than
+ // the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes.
+ // We use "out" degree because incoming edges come from nodes already pruned from the graph,
+ // and do not affect the coloring of this node.
+ size_t out_degree_;
+
+ // A unique identifier for this node, used to maintain determinism when storing
+ // interference nodes in sets.
+ const size_t id_;
+
+ // TODO: We could cache the result of interval_->RequiresRegister(), since it
+ // will not change for the lifetime of this node. (Currently, RequiresRegister() requires
+ // iterating through all uses of a live interval.)
+
+ DISALLOW_COPY_AND_ASSIGN(InterferenceNode);
+};
+
+static bool IsCoreInterval(LiveInterval* interval) {
+ return interval->GetType() != Primitive::kPrimFloat
+ && interval->GetType() != Primitive::kPrimDouble;
+}
+
+static size_t ComputeReservedArtMethodSlots(const CodeGenerator& codegen) {
+ return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize;
+}
+
+RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& liveness)
+ : RegisterAllocator(allocator, codegen, liveness),
+ core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ physical_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ physical_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ int_spill_slot_counter_(0),
+ double_spill_slot_counter_(0),
+ float_spill_slot_counter_(0),
+ long_spill_slot_counter_(0),
+ catch_phi_spill_slot_counter_(0),
+ reserved_art_method_slots_(ComputeReservedArtMethodSlots(*codegen)),
+ reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()),
+ number_of_globally_blocked_core_regs_(0),
+ number_of_globally_blocked_fp_regs_(0),
+ max_safepoint_live_core_regs_(0),
+ max_safepoint_live_fp_regs_(0),
+ coloring_attempt_allocator_(nullptr) {
+ // Before we ask for blocked registers, set them up in the code generator.
+ codegen->SetupBlockedRegisters();
+
+ // Initialize physical core register live intervals and blocked registers.
+ // This includes globally blocked registers, such as the stack pointer.
+ physical_core_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
+ for (size_t i = 0; i < codegen->GetNumberOfCoreRegisters(); ++i) {
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimInt);
+ physical_core_intervals_[i] = interval;
+ core_intervals_.push_back(interval);
+ if (codegen_->IsBlockedCoreRegister(i)) {
+ ++number_of_globally_blocked_core_regs_;
+ interval->AddRange(0, liveness.GetMaxLifetimePosition());
+ }
+ }
+ // Initialize physical floating point register live intervals and blocked registers.
+ physical_fp_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
+ for (size_t i = 0; i < codegen->GetNumberOfFloatingPointRegisters(); ++i) {
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimFloat);
+ physical_fp_intervals_[i] = interval;
+ fp_intervals_.push_back(interval);
+ if (codegen_->IsBlockedFloatingPointRegister(i)) {
+ ++number_of_globally_blocked_fp_regs_;
+ interval->AddRange(0, liveness.GetMaxLifetimePosition());
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::AllocateRegisters() {
+ // (1) Collect and prepare live intervals.
+ ProcessInstructions();
+
+ for (bool processing_core_regs : {true, false}) {
+ ArenaVector<LiveInterval*>& intervals = processing_core_regs
+ ? core_intervals_
+ : fp_intervals_;
+ size_t num_registers = processing_core_regs
+ ? codegen_->GetNumberOfCoreRegisters()
+ : codegen_->GetNumberOfFloatingPointRegisters();
+
+ size_t attempt = 0;
+ while (true) {
+ ++attempt;
+ DCHECK(attempt <= kMaxGraphColoringAttemptsDebug)
+ << "Exceeded debug max graph coloring register allocation attempts. "
+ << "This could indicate that the register allocator is not making forward progress, "
+ << "which could be caused by prioritizing the wrong live intervals. (Short intervals "
+ << "should be prioritized over long ones, because they cannot be split further.)";
+
+ // Reset the allocator for the next coloring attempt.
+ ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool());
+ coloring_attempt_allocator_ = &coloring_attempt_allocator;
+
+ // (2) Build the interference graph.
+ ArenaVector<InterferenceNode*> prunable_nodes(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ArenaVector<InterferenceNode*> safepoints(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+ BuildInterferenceGraph(intervals, &prunable_nodes, &safepoints);
+
+ // (3) Prune all uncolored nodes from interference graph.
+ ArenaStdStack<InterferenceNode*> pruned_nodes(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+ PruneInterferenceGraph(prunable_nodes, num_registers, &pruned_nodes);
+
+ // (4) Color pruned nodes based on interferences.
+ bool successful = ColorInterferenceGraph(&pruned_nodes, num_registers);
+
+ if (successful) {
+ // Compute the maximum number of live registers across safepoints.
+ // Notice that we do not count globally blocked registers, such as the stack pointer.
+ if (safepoints.size() > 0) {
+ size_t max_safepoint_live_regs = ComputeMaxSafepointLiveRegisters(safepoints);
+ if (processing_core_regs) {
+ max_safepoint_live_core_regs_ =
+ max_safepoint_live_regs - number_of_globally_blocked_core_regs_;
+ } else {
+ max_safepoint_live_fp_regs_=
+ max_safepoint_live_regs - number_of_globally_blocked_fp_regs_;
+ }
+ }
+
+ // Tell the code generator which registers were allocated.
+ // We only look at prunable_nodes because we already told the code generator about
+ // fixed intervals while processing instructions. We also ignore the fixed intervals
+ // placed at the top of catch blocks.
+ for (InterferenceNode* node : prunable_nodes) {
+ LiveInterval* interval = node->GetInterval();
+ if (interval->HasRegister()) {
+ Location low_reg = processing_core_regs
+ ? Location::RegisterLocation(interval->GetRegister())
+ : Location::FpuRegisterLocation(interval->GetRegister());
+ codegen_->AddAllocatedRegister(low_reg);
+ if (interval->HasHighInterval()) {
+ LiveInterval* high = interval->GetHighInterval();
+ DCHECK(high->HasRegister());
+ Location high_reg = processing_core_regs
+ ? Location::RegisterLocation(high->GetRegister())
+ : Location::FpuRegisterLocation(high->GetRegister());
+ codegen_->AddAllocatedRegister(high_reg);
+ }
+ } else {
+ DCHECK(!interval->HasHighInterval() || !interval->GetHighInterval()->HasRegister());
+ }
+ }
+
+ break;
+ }
+ } // while unsuccessful
+ } // for processing_core_instructions
+
+ // (5) Resolve locations and deconstruct SSA form.
+ RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ .Resolve(max_safepoint_live_core_regs_,
+ max_safepoint_live_fp_regs_,
+ reserved_art_method_slots_ + reserved_out_slots_,
+ int_spill_slot_counter_,
+ long_spill_slot_counter_,
+ float_spill_slot_counter_,
+ double_spill_slot_counter_,
+ catch_phi_spill_slot_counter_,
+ temp_intervals_);
+
+ if (kIsDebugBuild) {
+ Validate(/*log_fatal_on_failure*/ true);
+ }
+}
+
+bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) {
+ for (bool processing_core_regs : {true, false}) {
+ ArenaVector<LiveInterval*> intervals(
+ allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
+ HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+ LiveInterval* interval = instruction->GetLiveInterval();
+ if (interval != nullptr && IsCoreInterval(interval) == processing_core_regs) {
+ intervals.push_back(instruction->GetLiveInterval());
+ }
+ }
+
+ ArenaVector<LiveInterval*>& physical_intervals = processing_core_regs
+ ? physical_core_intervals_
+ : physical_fp_intervals_;
+ for (LiveInterval* fixed : physical_intervals) {
+ if (fixed->GetFirstRange() != nullptr) {
+ // Ideally we would check fixed ranges as well, but currently there are times when
+ // two fixed intervals for the same register will overlap. For example, a fixed input
+ // and a fixed output may sometimes share the same register, in which there will be two
+ // fixed intervals for the same place.
+ }
+ }
+
+ for (LiveInterval* temp : temp_intervals_) {
+ if (IsCoreInterval(temp) == processing_core_regs) {
+ intervals.push_back(temp);
+ }
+ }
+
+ size_t spill_slots = int_spill_slot_counter_
+ + long_spill_slot_counter_
+ + float_spill_slot_counter_
+ + double_spill_slot_counter_
+ + catch_phi_spill_slot_counter_;
+ bool ok = ValidateIntervals(intervals,
+ spill_slots,
+ reserved_art_method_slots_ + reserved_out_slots_,
+ *codegen_,
+ allocator_,
+ processing_core_regs,
+ log_fatal_on_failure);
+ if (!ok) {
+ return false;
+ }
+ } // for processing_core_regs
+
+ return true;
+}
+
+void RegisterAllocatorGraphColor::ProcessInstructions() {
+ for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+
+ // Note that we currently depend on this ordering, since some helper
+ // code is designed for linear scan register allocation.
+ for (HBackwardInstructionIterator instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ ProcessInstruction(instr_it.Current());
+ }
+
+ for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ ProcessInstruction(phi_it.Current());
+ }
+
+ if (block->IsCatchBlock() || (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
+ // By blocking all registers at the top of each catch block or irreducible loop, we force
+ // intervals belonging to the live-in set of the catch/header block to be spilled.
+ // TODO(ngeoffray): Phis in this block could be allocated in register.
+ size_t position = block->GetLifetimeStart();
+ BlockRegisters(position, position + 1);
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::ProcessInstruction(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations == nullptr) {
+ return;
+ }
+ if (locations->NeedsSafepoint() && codegen_->IsLeafMethod()) {
+ // We do this here because we do not want the suspend check to artificially
+ // create live registers.
+ DCHECK(instruction->IsSuspendCheckEntry());
+ DCHECK_EQ(locations->GetTempCount(), 0u);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ CheckForTempLiveIntervals(instruction);
+ CheckForSafepoint(instruction);
+ if (instruction->GetLocations()->WillCall()) {
+ // If a call will happen, create fixed intervals for caller-save registers.
+ // TODO: Note that it may be beneficial to later split intervals at this point,
+ // so that we allow last-minute moves from a caller-save register
+ // to a callee-save register.
+ BlockRegisters(instruction->GetLifetimePosition(),
+ instruction->GetLifetimePosition() + 1,
+ /*caller_save_only*/ true);
+ }
+ CheckForFixedInputs(instruction);
+
+ LiveInterval* interval = instruction->GetLiveInterval();
+ if (interval == nullptr) {
+ // Instructions lacking a valid output location do not have a live interval.
+ DCHECK(!locations->Out().IsValid());
+ return;
+ }
+
+ // Low intervals act as representatives for their corresponding high interval.
+ DCHECK(!interval->IsHighInterval());
+ if (codegen_->NeedsTwoRegisters(interval->GetType())) {
+ interval->AddHighInterval();
+ }
+ AddSafepointsFor(instruction);
+ CheckForFixedOutput(instruction);
+ AllocateSpillSlotForCatchPhi(instruction);
+
+ ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
+ ? core_intervals_
+ : fp_intervals_;
+ if (interval->HasSpillSlot() || instruction->IsConstant()) {
+ // Note that if an interval already has a spill slot, then its value currently resides
+ // in the stack (e.g., parameters). Thus we do not have to allocate a register until its first
+ // register use. This is also true for constants, which can be materialized at any point.
+ size_t first_register_use = interval->FirstRegisterUse();
+ if (first_register_use != kNoLifetime) {
+ LiveInterval* split = SplitBetween(interval, interval->GetStart(), first_register_use - 1);
+ intervals.push_back(split);
+ } else {
+ // We won't allocate a register for this value.
+ }
+ } else {
+ intervals.push_back(interval);
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForFixedInputs(HInstruction* instruction) {
+ // We simply block physical registers where necessary.
+ // TODO: Ideally we would coalesce the physical register with the register
+ // allocated to the input value, but this can be tricky if, e.g., there
+ // could be multiple physical register uses of the same value at the
+ // same instruction. Need to think about it more.
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+ for (size_t i = 0; i < locations->GetInputCount(); ++i) {
+ Location input = locations->InAt(i);
+ if (input.IsRegister() || input.IsFpuRegister()) {
+ BlockRegister(input, position, position + 1);
+ codegen_->AddAllocatedRegister(input);
+ } else if (input.IsPair()) {
+ BlockRegister(input.ToLow(), position, position + 1);
+ BlockRegister(input.ToHigh(), position, position + 1);
+ codegen_->AddAllocatedRegister(input.ToLow());
+ codegen_->AddAllocatedRegister(input.ToHigh());
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForFixedOutput(HInstruction* instruction) {
+ // If an instruction has a fixed output location, we give the live interval a register and then
+ // proactively split it just after the definition point to avoid creating too many interferences
+ // with a fixed node.
+ LiveInterval* interval = instruction->GetLiveInterval();
+ Location out = interval->GetDefinedBy()->GetLocations()->Out();
+ size_t position = instruction->GetLifetimePosition();
+ DCHECK_GE(interval->GetEnd() - position, 2u);
+
+ if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) {
+ out = instruction->GetLocations()->InAt(0);
+ }
+
+ if (out.IsRegister() || out.IsFpuRegister()) {
+ interval->SetRegister(out.reg());
+ codegen_->AddAllocatedRegister(out);
+ Split(interval, position + 1);
+ } else if (out.IsPair()) {
+ interval->SetRegister(out.low());
+ interval->GetHighInterval()->SetRegister(out.high());
+ codegen_->AddAllocatedRegister(out.ToLow());
+ codegen_->AddAllocatedRegister(out.ToHigh());
+ Split(interval, position + 1);
+ } else if (out.IsStackSlot() || out.IsDoubleStackSlot()) {
+ interval->SetSpillSlot(out.GetStackIndex());
+ } else {
+ DCHECK(out.IsUnallocated() || out.IsConstant());
+ }
+}
+
+void RegisterAllocatorGraphColor::AddSafepointsFor(HInstruction* instruction) {
+ LiveInterval* interval = instruction->GetLiveInterval();
+ for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
+ HInstruction* safepoint = safepoints_[safepoint_index - 1u];
+ size_t safepoint_position = safepoint->GetLifetimePosition();
+
+ // Test that safepoints_ are ordered in the optimal way.
+ DCHECK(safepoint_index == safepoints_.size() ||
+ safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
+
+ if (safepoint_position == interval->GetStart()) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ DCHECK_EQ(safepoint_index, safepoints_.size());
+ DCHECK_EQ(safepoint, instruction);
+ continue;
+ } else if (interval->IsDeadAt(safepoint_position)) {
+ break;
+ } else if (!interval->Covers(safepoint_position)) {
+ // Hole in the interval.
+ continue;
+ }
+ interval->AddSafepoint(safepoint);
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForTempLiveIntervals(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+ for (size_t i = 0; i < locations->GetTempCount(); ++i) {
+ Location temp = locations->GetTemp(i);
+ if (temp.IsRegister() || temp.IsFpuRegister()) {
+ BlockRegister(temp, position, position + 1);
+ codegen_->AddAllocatedRegister(temp);
+ } else {
+ DCHECK(temp.IsUnallocated());
+ switch (temp.GetPolicy()) {
+ case Location::kRequiresRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
+ interval->AddTempUse(instruction, i);
+ core_intervals_.push_back(interval);
+ temp_intervals_.push_back(interval);
+ break;
+ }
+
+ case Location::kRequiresFpuRegister: {
+ LiveInterval* interval =
+ LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
+ interval->AddTempUse(instruction, i);
+ fp_intervals_.push_back(interval);
+ temp_intervals_.push_back(interval);
+ if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
+ interval->AddHighInterval(/*is_temp*/ true);
+ temp_intervals_.push_back(interval->GetHighInterval());
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected policy for temporary location "
+ << temp.GetPolicy();
+ }
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::CheckForSafepoint(HInstruction* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ size_t position = instruction->GetLifetimePosition();
+
+ if (locations->NeedsSafepoint()) {
+ safepoints_.push_back(instruction);
+ if (locations->OnlyCallsOnSlowPath()) {
+ // We add a synthesized range at this position to record the live registers
+ // at this position. Ideally, we could just update the safepoints when locations
+ // are updated, but we currently need to know the full stack size before updating
+ // locations (because of parameters and the fact that we don't have a frame pointer).
+ // And knowing the full stack size requires to know the maximum number of live
+ // registers at calls in slow paths.
+ // By adding the following interval in the algorithm, we can compute this
+ // maximum before updating locations.
+ LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
+ interval->AddRange(position, position + 1);
+ core_intervals_.push_back(interval);
+ fp_intervals_.push_back(interval);
+ }
+ }
+}
+
+LiveInterval* RegisterAllocatorGraphColor::TrySplit(LiveInterval* interval, size_t position) {
+ if (interval->GetStart() < position && position < interval->GetEnd()) {
+ return Split(interval, position);
+ } else {
+ return interval;
+ }
+}
+
+void RegisterAllocatorGraphColor::SplitAtRegisterUses(LiveInterval* interval) {
+ DCHECK(!interval->IsHighInterval());
+
+ // Split just after a register definition.
+ if (interval->IsParent() && interval->DefinitionRequiresRegister()) {
+ interval = TrySplit(interval, interval->GetStart() + 1);
+ }
+
+ UsePosition* use = interval->GetFirstUse();
+ while (use != nullptr && use->GetPosition() < interval->GetStart()) {
+ use = use->GetNext();
+ }
+
+ // Split around register uses.
+ size_t end = interval->GetEnd();
+ while (use != nullptr && use->GetPosition() <= end) {
+ if (use->RequiresRegister()) {
+ size_t position = use->GetPosition();
+ interval = TrySplit(interval, position - 1);
+ if (liveness_.GetInstructionFromPosition(position / 2)->IsControlFlow()) {
+ // If we are at the very end of a basic block, we cannot split right
+ // at the use. Split just after instead.
+ interval = TrySplit(interval, position + 1);
+ } else {
+ interval = TrySplit(interval, position);
+ }
+ }
+ use = use->GetNext();
+ }
+}
+
+void RegisterAllocatorGraphColor::AllocateSpillSlotForCatchPhi(HInstruction* instruction) {
+ if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ HPhi* phi = instruction->AsPhi();
+ LiveInterval* interval = phi->GetLiveInterval();
+
+ HInstruction* previous_phi = phi->GetPrevious();
+ DCHECK(previous_phi == nullptr ||
+ previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
+ << "Phis expected to be sorted by vreg number, "
+ << "so that equivalent phis are adjacent.";
+
+ if (phi->IsVRegEquivalentOf(previous_phi)) {
+ // Assign the same spill slot.
+ DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
+ interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
+ } else {
+ interval->SetSpillSlot(catch_phi_spill_slot_counter_);
+ catch_phi_spill_slot_counter_ += interval->NeedsTwoSpillSlots() ? 2 : 1;
+ }
+ }
+}
+
+void RegisterAllocatorGraphColor::BlockRegister(Location location,
+ size_t start,
+ size_t end) {
+ DCHECK(location.IsRegister() || location.IsFpuRegister());
+ int reg = location.reg();
+ LiveInterval* interval = location.IsRegister()
+ ? physical_core_intervals_[reg]
+ : physical_fp_intervals_[reg];
+ DCHECK(interval->GetRegister() == reg);
+ bool blocked_by_codegen = location.IsRegister()
+ ? codegen_->IsBlockedCoreRegister(reg)
+ : codegen_->IsBlockedFloatingPointRegister(reg);
+ if (blocked_by_codegen) {
+ // We've already blocked this register for the entire method. (And adding a
+ // range inside another range violates the preconditions of AddRange).
+ } else {
+ interval->AddRange(start, end);
+ }
+}
+
+void RegisterAllocatorGraphColor::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
+ for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
+ BlockRegister(Location::RegisterLocation(i), start, end);
+ }
+ }
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
+ BlockRegister(Location::FpuRegisterLocation(i), start, end);
+ }
+ }
+}
+
+// Add an interference edge, but only if necessary.
+static void AddPotentialInterference(InterferenceNode* from, InterferenceNode* to) {
+ if (from->GetInterval()->HasRegister()) {
+ // We save space by ignoring outgoing edges from fixed nodes.
+ } else if (to->GetInterval()->IsSlowPathSafepoint()) {
+ // Safepoint intervals are only there to count max live registers,
+ // so no need to give them incoming interference edges.
+ // This is also necessary for correctness, because we don't want nodes
+ // to remove themselves from safepoint adjacency sets when they're pruned.
+ } else {
+ from->AddInterference(to);
+ }
+}
+
+// TODO: See locations->OutputCanOverlapWithInputs(); we may want to consider
+// this when building the interference graph.
+void RegisterAllocatorGraphColor::BuildInterferenceGraph(
+ const ArenaVector<LiveInterval*>& intervals,
+ ArenaVector<InterferenceNode*>* prunable_nodes,
+ ArenaVector<InterferenceNode*>* safepoints) {
+ size_t interval_id_counter = 0;
+
+ // Build the interference graph efficiently by ordering range endpoints
+ // by position and doing a linear sweep to find interferences. (That is, we
+ // jump from endpoint to endpoint, maintaining a set of intervals live at each
+ // point. If two nodes are ever in the live set at the same time, then they
+ // interfere with each other.)
+ //
+ // We order by both position and (secondarily) by whether the endpoint
+ // begins or ends a range; we want to process range endings before range
+ // beginnings at the same position because they should not conflict.
+ //
+ // For simplicity, we create a tuple for each endpoint, and then sort the tuples.
+ // Tuple contents: (position, is_range_beginning, node).
+ ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+ for (LiveInterval* parent : intervals) {
+ for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) {
+ LiveRange* range = sibling->GetFirstRange();
+ if (range != nullptr) {
+ InterferenceNode* node = new (coloring_attempt_allocator_) InterferenceNode(
+ coloring_attempt_allocator_, sibling, interval_id_counter++);
+ if (sibling->HasRegister()) {
+ // Fixed nodes will never be pruned, so no need to keep track of them.
+ } else if (sibling->IsSlowPathSafepoint()) {
+ // Safepoint intervals are synthesized to count max live registers.
+ // They will be processed separately after coloring.
+ safepoints->push_back(node);
+ } else {
+ prunable_nodes->push_back(node);
+ }
+
+ while (range != nullptr) {
+ range_endpoints.push_back(std::make_tuple(range->GetStart(), true, node));
+ range_endpoints.push_back(std::make_tuple(range->GetEnd(), false, node));
+ range = range->GetNext();
+ }
+ }
+ }
+ }
+
+ // Sort the endpoints.
+ std::sort(range_endpoints.begin(), range_endpoints.end());
+
+ // Nodes live at the current position in the linear sweep.
+ ArenaSet<InterferenceNode*, decltype(&InterferenceNode::CmpPtr)> live(
+ InterferenceNode::CmpPtr, coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the
+ // live set. When we encounter the end of a range, we remove the corresponding node
+ // from the live set. Nodes interfere if they are in the live set at the same time.
+ for (auto it = range_endpoints.begin(); it != range_endpoints.end(); ++it) {
+ bool is_range_beginning;
+ InterferenceNode* node;
+ // Extract information from the tuple, including the node this tuple represents.
+ std::tie(std::ignore, is_range_beginning, node) = *it;
+
+ if (is_range_beginning) {
+ for (InterferenceNode* conflicting : live) {
+ DCHECK_NE(node, conflicting);
+ AddPotentialInterference(node, conflicting);
+ AddPotentialInterference(conflicting, node);
+ }
+ DCHECK_EQ(live.count(node), 0u);
+ live.insert(node);
+ } else {
+ // End of range.
+ DCHECK_EQ(live.count(node), 1u);
+ live.erase(node);
+ }
+ }
+ DCHECK(live.empty());
+}
+
+// The order in which we color nodes is vital to both correctness (forward
+// progress) and code quality. Specifically, we must prioritize intervals
+// that require registers, and after that we must prioritize short intervals.
+// That way, if we fail to color a node, it either won't require a register,
+// or it will be a long interval that can be split in order to make the
+// interference graph sparser.
+// TODO: May also want to consider:
+// - Loop depth
+// - Constants (since they can be rematerialized)
+// - Allocated spill slots
+static bool GreaterNodePriority(const InterferenceNode* lhs,
+ const InterferenceNode* rhs) {
+ LiveInterval* lhs_interval = lhs->GetInterval();
+ LiveInterval* rhs_interval = rhs->GetInterval();
+
+ // (1) Choose the interval that requires a register.
+ if (lhs_interval->RequiresRegister() != rhs_interval->RequiresRegister()) {
+ return lhs_interval->RequiresRegister();
+ }
+
+ // (2) Choose the interval that has a shorter life span.
+ if (lhs_interval->GetLength() != rhs_interval->GetLength()) {
+ return lhs_interval->GetLength() < rhs_interval->GetLength();
+ }
+
+ // (3) Just choose the interval based on a deterministic ordering.
+ return InterferenceNode::CmpPtr(lhs, rhs);
+}
+
+void RegisterAllocatorGraphColor::PruneInterferenceGraph(
+ const ArenaVector<InterferenceNode*>& prunable_nodes,
+ size_t num_regs,
+ ArenaStdStack<InterferenceNode*>* pruned_nodes) {
+ // When pruning the graph, we refer to nodes with degree less than num_regs as low degree nodes,
+ // and all others as high degree nodes. The distinction is important: low degree nodes are
+ // guaranteed a color, while high degree nodes are not.
+
+ // Low-degree nodes are guaranteed a color, so worklist order does not matter.
+ ArenaDeque<InterferenceNode*> low_degree_worklist(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // If we have to prune from the high-degree worklist, we cannot guarantee
+ // the pruned node a color. So, we order the worklist by priority.
+ ArenaSet<InterferenceNode*, decltype(&GreaterNodePriority)> high_degree_worklist(
+ GreaterNodePriority, coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+
+ // Build worklists.
+ for (InterferenceNode* node : prunable_nodes) {
+ DCHECK(!node->GetInterval()->HasRegister())
+ << "Fixed nodes should never be pruned";
+ DCHECK(!node->GetInterval()->IsSlowPathSafepoint())
+ << "Safepoint nodes should never be pruned";
+ if (node->GetOutDegree() < num_regs) {
+ low_degree_worklist.push_back(node);
+ } else {
+ high_degree_worklist.insert(node);
+ }
+ }
+
+ // Helper function to prune an interval from the interference graph,
+ // which includes updating the worklists.
+ auto prune_node = [this,
+ num_regs,
+ &pruned_nodes,
+ &low_degree_worklist,
+ &high_degree_worklist] (InterferenceNode* node) {
+ DCHECK(!node->GetInterval()->HasRegister());
+ pruned_nodes->push(node);
+ for (InterferenceNode* adjacent : node->GetAdjacentNodes()) {
+ DCHECK(!adjacent->GetInterval()->IsSlowPathSafepoint())
+ << "Nodes should never interfere with synthesized safepoint nodes";
+ if (adjacent->GetInterval()->HasRegister()) {
+ // No effect on pre-colored nodes; they're never pruned.
+ } else {
+ bool was_high_degree = adjacent->GetOutDegree() >= num_regs;
+ DCHECK(adjacent->ContainsInterference(node))
+ << "Missing incoming interference edge from non-fixed node";
+ adjacent->RemoveInterference(node);
+ if (was_high_degree && adjacent->GetOutDegree() < num_regs) {
+ // This is a transition from high degree to low degree.
+ DCHECK_EQ(high_degree_worklist.count(adjacent), 1u);
+ high_degree_worklist.erase(adjacent);
+ low_degree_worklist.push_back(adjacent);
+ }
+ }
+ }
+ };
+
+ // Prune graph.
+ while (!low_degree_worklist.empty() || !high_degree_worklist.empty()) {
+ while (!low_degree_worklist.empty()) {
+ InterferenceNode* node = low_degree_worklist.front();
+ // TODO: pop_back() should work as well, but it doesn't; we get a
+ // failed check while pruning. We should look into this.
+ low_degree_worklist.pop_front();
+ prune_node(node);
+ }
+ if (!high_degree_worklist.empty()) {
+ // We prune the lowest-priority node, because pruning a node earlier
+ // gives it a higher chance of being spilled.
+ InterferenceNode* node = *high_degree_worklist.rbegin();
+ high_degree_worklist.erase(node);
+ prune_node(node);
+ }
+ }
+}
+
+// Build a mask with a bit set for each register assigned to some
+// interval in `intervals`.
+template <typename Container>
+static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) {
+ std::bitset<kMaxNumRegs> conflict_mask;
+ for (InterferenceNode* adjacent : intervals) {
+ LiveInterval* conflicting = adjacent->GetInterval();
+ if (conflicting->HasRegister()) {
+ conflict_mask.set(conflicting->GetRegister());
+ if (conflicting->HasHighInterval()) {
+ DCHECK(conflicting->GetHighInterval()->HasRegister());
+ conflict_mask.set(conflicting->GetHighInterval()->GetRegister());
+ }
+ } else {
+ DCHECK(!conflicting->HasHighInterval()
+ || !conflicting->GetHighInterval()->HasRegister());
+ }
+ }
+ return conflict_mask;
+}
+
+bool RegisterAllocatorGraphColor::ColorInterferenceGraph(
+ ArenaStdStack<InterferenceNode*>* pruned_nodes,
+ size_t num_regs) {
+ DCHECK_LE(num_regs, kMaxNumRegs) << "kMaxNumRegs is too small";
+ ArenaVector<LiveInterval*> colored_intervals(
+ coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator));
+ bool successful = true;
+
+ while (!pruned_nodes->empty()) {
+ InterferenceNode* node = pruned_nodes->top();
+ pruned_nodes->pop();
+ LiveInterval* interval = node->GetInterval();
+
+ // Search for free register(s).
+ // Note that the graph coloring allocator assumes that pair intervals are aligned here,
+ // excluding pre-colored pair intervals (which can currently be unaligned on x86).
+ std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(node->GetAdjacentNodes());
+ size_t reg = 0;
+ if (interval->HasHighInterval()) {
+ while (reg < num_regs - 1 && (conflict_mask[reg] || conflict_mask[reg + 1])) {
+ reg += 2;
+ }
+ } else {
+ // We use CTZ (count trailing zeros) to quickly find the lowest available register.
+ // Note that CTZ is undefined for 0, so we special-case it.
+ reg = conflict_mask.all() ? conflict_mask.size() : CTZ(~conflict_mask.to_ulong());
+ }
+
+ if (reg < (interval->HasHighInterval() ? num_regs - 1 : num_regs)) {
+ // Assign register.
+ DCHECK(!interval->HasRegister());
+ interval->SetRegister(reg);
+ colored_intervals.push_back(interval);
+ if (interval->HasHighInterval()) {
+ DCHECK(!interval->GetHighInterval()->HasRegister());
+ interval->GetHighInterval()->SetRegister(reg + 1);
+ colored_intervals.push_back(interval->GetHighInterval());
+ }
+ } else if (interval->RequiresRegister()) {
+ // The interference graph is too dense to color. Make it sparser by
+ // splitting this live interval.
+ successful = false;
+ SplitAtRegisterUses(interval);
+ // We continue coloring, because there may be additional intervals that cannot
+ // be colored, and that we should split.
+ } else {
+ // Spill.
+ AllocateSpillSlotFor(interval);
+ }
+ }
+
+ // If unsuccessful, reset all register assignments.
+ if (!successful) {
+ for (LiveInterval* interval : colored_intervals) {
+ interval->ClearRegister();
+ }
+ }
+
+ return successful;
+}
+
+size_t RegisterAllocatorGraphColor::ComputeMaxSafepointLiveRegisters(
+ const ArenaVector<InterferenceNode*>& safepoints) {
+ size_t max_safepoint_live_regs = 0;
+ for (InterferenceNode* safepoint : safepoints) {
+ DCHECK(safepoint->GetInterval()->IsSlowPathSafepoint());
+ std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(safepoint->GetAdjacentNodes());
+ size_t live_regs = conflict_mask.count();
+ max_safepoint_live_regs = std::max(max_safepoint_live_regs, live_regs);
+ }
+ return max_safepoint_live_regs;
+}
+
+void RegisterAllocatorGraphColor::AllocateSpillSlotFor(LiveInterval* interval) {
+ LiveInterval* parent = interval->GetParent();
+ HInstruction* defined_by = parent->GetDefinedBy();
+ if (parent->HasSpillSlot()) {
+ // We already have a spill slot for this value that we can reuse.
+ } else if (defined_by->IsParameterValue()) {
+ // Parameters already have a stack slot.
+ parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
+ } else if (defined_by->IsCurrentMethod()) {
+ // The current method is always at spill slot 0.
+ parent->SetSpillSlot(0);
+ } else if (defined_by->IsConstant()) {
+ // Constants don't need a spill slot.
+ } else {
+ // Allocate a spill slot based on type.
+ size_t* spill_slot_counter;
+ switch (interval->GetType()) {
+ case Primitive::kPrimDouble:
+ spill_slot_counter = &double_spill_slot_counter_;
+ break;
+ case Primitive::kPrimLong:
+ spill_slot_counter = &long_spill_slot_counter_;
+ break;
+ case Primitive::kPrimFloat:
+ spill_slot_counter = &float_spill_slot_counter_;
+ break;
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimShort:
+ spill_slot_counter = &int_spill_slot_counter_;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
+ UNREACHABLE();
+ }
+
+ parent->SetSpillSlot(*spill_slot_counter);
+ *spill_slot_counter += parent->NeedsTwoSpillSlots() ? 2 : 1;
+ // TODO: Could color stack slots if we wanted to, even if
+ // it's just a trivial coloring. See the linear scan implementation,
+ // which simply reuses spill slots for values whose live intervals
+ // have already ended.
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
new file mode 100644
index 0000000..0b5af96
--- /dev/null
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
+
+#include "arch/instruction_set.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
+#include "base/macros.h"
+#include "primitive.h"
+#include "register_allocator.h"
+
+namespace art {
+
+class CodeGenerator;
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HParallelMove;
+class Location;
+class SsaLivenessAnalysis;
+class InterferenceNode;
+
+/**
+ * A graph coloring register allocator.
+ *
+ * The algorithm proceeds as follows:
+ * (1) Build an interference graph, where nodes represent live intervals, and edges represent
+ * interferences between two intervals. Coloring this graph with k colors is isomorphic to
+ * finding a valid register assignment with k registers.
+ * (2) To color the graph, first prune all nodes with degree less than k, since these nodes are
+ * guaranteed a color. (No matter how we color their adjacent nodes, we can give them a
+ * different color.) As we prune nodes from the graph, more nodes may drop below degree k,
+ * enabling further pruning. The key is to maintain the pruning order in a stack, so that we
+ * can color the nodes in the reverse order.
+ * When there are no more nodes with degree less than k, we start pruning alternate nodes based
+ * on heuristics. Since these nodes are not guaranteed a color, we are careful to
+ * prioritize nodes that require a register. We also prioritize short intervals, because
+ * short intervals cannot be split very much if coloring fails (see below). "Prioritizing"
+ * a node amounts to pruning it later, since it will have fewer interferences if we prune other
+ * nodes first.
+ * (3) We color nodes in the reverse order in which we pruned them. If we cannot assign
+ * a node a color, we do one of two things:
+ * - If the node requires a register, we consider the current coloring attempt a failure.
+ * However, we split the node's live interval in order to make the interference graph
+ * sparser, so that future coloring attempts may succeed.
+ * - If the node does not require a register, we simply assign it a location on the stack.
+ *
+ * A good reference for graph coloring register allocation is
+ * "Modern Compiler Implementation in Java" (Andrew W. Appel, 2nd Edition).
+ */
+class RegisterAllocatorGraphColor : public RegisterAllocator {
+ public:
+ RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis);
+ ~RegisterAllocatorGraphColor() OVERRIDE {}
+
+ void AllocateRegisters() OVERRIDE;
+
+ bool Validate(bool log_fatal_on_failure);
+
+ private:
+ // Collect all intervals and prepare for register allocation.
+ void ProcessInstructions();
+ void ProcessInstruction(HInstruction* instruction);
+
+ // If any inputs require specific registers, block those registers
+ // at the position of this instruction.
+ void CheckForFixedInputs(HInstruction* instruction);
+
+ // If the output of an instruction requires a specific register, split
+ // the interval and assign the register to the first part.
+ void CheckForFixedOutput(HInstruction* instruction);
+
+ // Add all applicable safepoints to a live interval.
+ // Currently depends on instruction processing order.
+ void AddSafepointsFor(HInstruction* instruction);
+
+ // Collect all live intervals associated with the temporary locations
+ // needed by an instruction.
+ void CheckForTempLiveIntervals(HInstruction* instruction);
+
+ // If a safe point is needed, add a synthesized interval to later record
+ // the number of live registers at this point.
+ void CheckForSafepoint(HInstruction* instruction);
+
+ // Split an interval, but only if `position` is inside of `interval`.
+ // Return either the new interval, or the original interval if not split.
+ static LiveInterval* TrySplit(LiveInterval* interval, size_t position);
+
+ // To ensure every graph can be colored, split live intervals
+ // at their register defs and uses. This creates short intervals with low
+ // degree in the interference graph, which are prioritized during graph
+ // coloring.
+ void SplitAtRegisterUses(LiveInterval* interval);
+
+ // If the given instruction is a catch phi, give it a spill slot.
+ void AllocateSpillSlotForCatchPhi(HInstruction* instruction);
+
+ // Ensure that the given register cannot be allocated for a given range.
+ void BlockRegister(Location location, size_t start, size_t end);
+ void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
+
+ // Use the intervals collected from instructions to construct an
+ // interference graph mapping intervals to adjacency lists.
+ // Also, collect synthesized safepoint nodes, used to keep
+ // track of live intervals across safepoints.
+ void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
+ ArenaVector<InterferenceNode*>* prunable_nodes,
+ ArenaVector<InterferenceNode*>* safepoints);
+
+ // Prune nodes from the interference graph to be colored later. Build
+ // a stack (pruned_nodes) containing these intervals in an order determined
+ // by various heuristics.
+ void PruneInterferenceGraph(const ArenaVector<InterferenceNode*>& prunable_nodes,
+ size_t num_registers,
+ ArenaStdStack<InterferenceNode*>* pruned_nodes);
+
+ // Process pruned_intervals to color the interference graph, spilling when
+ // necessary. Return true if successful. Else, split some intervals to make
+ // the interference graph sparser.
+ bool ColorInterferenceGraph(ArenaStdStack<InterferenceNode*>* pruned_nodes,
+ size_t num_registers);
+
+ // Return the maximum number of registers live at safepoints,
+ // based on the outgoing interference edges of safepoint nodes.
+ size_t ComputeMaxSafepointLiveRegisters(const ArenaVector<InterferenceNode*>& safepoints);
+
+ // If necessary, add the given interval to the list of spilled intervals,
+ // and make sure it's ready to be spilled to the stack.
+ void AllocateSpillSlotFor(LiveInterval* interval);
+
+ // Live intervals, split by kind (core and floating point).
+ // These should not contain high intervals, as those are represented by
+ // the corresponding low interval throughout register allocation.
+ ArenaVector<LiveInterval*> core_intervals_;
+ ArenaVector<LiveInterval*> fp_intervals_;
+
+ // Intervals for temporaries, saved for special handling in the resolution phase.
+ ArenaVector<LiveInterval*> temp_intervals_;
+
+ // Safepoints, saved for special handling while processing instructions.
+ ArenaVector<HInstruction*> safepoints_;
+
+ // Live intervals for specific registers. These become pre-colored nodes
+ // in the interference graph.
+ ArenaVector<LiveInterval*> physical_core_intervals_;
+ ArenaVector<LiveInterval*> physical_fp_intervals_;
+
+ // Allocated stack slot counters.
+ size_t int_spill_slot_counter_;
+ size_t double_spill_slot_counter_;
+ size_t float_spill_slot_counter_;
+ size_t long_spill_slot_counter_;
+ size_t catch_phi_spill_slot_counter_;
+
+ // Number of stack slots needed for the pointer to the current method.
+ // This is 1 for 32-bit architectures, and 2 for 64-bit architectures.
+ const size_t reserved_art_method_slots_;
+
+ // Number of stack slots needed for outgoing arguments.
+ const size_t reserved_out_slots_;
+
+ // The number of globally blocked core and floating point registers, such as the stack pointer.
+ size_t number_of_globally_blocked_core_regs_;
+ size_t number_of_globally_blocked_fp_regs_;
+
+ // The maximum number of registers live at safe points. Needed by the code generator.
+ size_t max_safepoint_live_core_regs_;
+ size_t max_safepoint_live_fp_regs_;
+
+ // An arena allocator used for a single graph coloring attempt.
+ // Many data structures are cleared between graph coloring attempts, so we reduce
+ // total memory usage by using a new arena allocator for each attempt.
+ ArenaAllocator* coloring_attempt_allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorGraphColor);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index b6e4f92..1a643a0 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -43,6 +43,7 @@
RegisterAllocatorLinearScan(ArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
+ ~RegisterAllocatorLinearScan() OVERRIDE {}
void AllocateRegisters() OVERRIDE;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index cbb7b2f..55ea99e 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -31,12 +31,29 @@
namespace art {
+using Strategy = RegisterAllocator::Strategy;
+
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
-class RegisterAllocatorTest : public CommonCompilerTest {};
+class RegisterAllocatorTest : public CommonCompilerTest {
+ protected:
+ // These functions need to access private variables of LocationSummary, so we declare it
+ // as a member of RegisterAllocatorTest, which we make a friend class.
+ static void SameAsFirstInputHint(Strategy strategy);
+ static void ExpectedInRegisterHint(Strategy strategy);
+};
-static bool Check(const uint16_t* data) {
+// This macro should include all register allocation strategies that should be tested.
+#define TEST_ALL_STRATEGIES(test_name)\
+TEST_F(RegisterAllocatorTest, test_name##_LinearScan) {\
+ test_name(Strategy::kRegisterAllocatorLinearScan);\
+}\
+TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
+ test_name(Strategy::kRegisterAllocatorGraphColor);\
+}
+
+static bool Check(const uint16_t* data, Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HGraph* graph = CreateCFG(&allocator, data);
@@ -45,7 +62,8 @@
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -143,7 +161,7 @@
}
}
-TEST_F(RegisterAllocatorTest, CFG1) {
+static void CFG1(Strategy strategy) {
/*
* Test the following snippet:
* return 0;
@@ -160,10 +178,12 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop1) {
+TEST_ALL_STRATEGIES(CFG1);
+
+static void Loop1(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -199,10 +219,12 @@
Instruction::CONST_4 | 5 << 12 | 1 << 8,
Instruction::RETURN | 1 << 8);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop2) {
+TEST_ALL_STRATEGIES(Loop1);
+
+static void Loop2(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -248,10 +270,12 @@
Instruction::ADD_INT, 1 << 8 | 0,
Instruction::RETURN | 1 << 8);
- ASSERT_TRUE(Check(data));
+ ASSERT_TRUE(Check(data, strategy));
}
-TEST_F(RegisterAllocatorTest, Loop3) {
+TEST_ALL_STRATEGIES(Loop2);
+
+static void Loop3(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0
@@ -296,7 +320,8 @@
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -314,6 +339,8 @@
ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
}
+TEST_ALL_STRATEGIES(Loop3);
+
TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -354,7 +381,7 @@
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-TEST_F(RegisterAllocatorTest, DeadPhi) {
+static void DeadPhi(Strategy strategy) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -385,15 +412,19 @@
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- RegisterAllocator* register_allocator = RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator* register_allocator =
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
+TEST_ALL_STRATEGIES(DeadPhi);
+
/**
* Test that the TryAllocateFreeReg method works in the presence of inactive intervals
* that share the same register. It should split the interval it is currently
* allocating for at the minimum lifetime position between the two inactive intervals.
+ * This test only applies to the linear scan allocator.
*/
TEST_F(RegisterAllocatorTest, FreeUntil) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
@@ -507,15 +538,15 @@
graph->GetDexFile(),
dex_cache,
0);
-*input2 = new (allocator) HInstanceFieldGet(parameter,
- Primitive::kPrimInt,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- dex_cache,
- 0);
+ *input2 = new (allocator) HInstanceFieldGet(parameter,
+ Primitive::kPrimInt,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ dex_cache,
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
join->AddInstruction(new (allocator) HExit());
@@ -527,7 +558,7 @@
return graph;
}
-TEST_F(RegisterAllocatorTest, PhiHint) {
+static void PhiHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HPhi *phi;
@@ -543,7 +574,7 @@
// Check that the register allocator is deterministic.
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -563,7 +594,7 @@
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -583,7 +614,7 @@
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -603,7 +634,7 @@
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -612,6 +643,12 @@
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, PhiHint_LinearScan) {
+ PhiHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HInstruction** field,
HInstruction** ret) {
@@ -650,7 +687,7 @@
return graph;
}
-TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint) {
+void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
@@ -664,7 +701,7 @@
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -684,13 +721,19 @@
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint_LinearScan) {
+ ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
HInstruction** first_sub,
HInstruction** second_sub) {
@@ -720,7 +763,7 @@
return graph;
}
-TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
+void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
@@ -734,7 +777,7 @@
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -757,7 +800,7 @@
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -765,6 +808,12 @@
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, SameAsFirstInputHint_LinearScan) {
+ SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
static HGraph* BuildDiv(ArenaAllocator* allocator,
HInstruction** div) {
HGraph* graph = CreateGraph(allocator);
@@ -791,7 +840,7 @@
return graph;
}
-TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *div;
@@ -805,7 +854,7 @@
liveness.Analyze();
RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness);
+ RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// div on x86 requires its first input in eax and the output be the same as the first input.
@@ -813,9 +862,16 @@
}
}
+// TODO: Enable this test for graph coloring register allocation when iterative move
+// coalescing is merged.
+TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint_LinearScan) {
+ ExpectedExactInRegisterAndSameOutputHint(Strategy::kRegisterAllocatorLinearScan);
+}
+
// Test a bug in the register allocator, where allocating a blocked
// register would lead to spilling an inactive interval at the wrong
// position.
+// This test only applies to the linear scan allocator.
TEST_F(RegisterAllocatorTest, SpillInactive) {
ArenaPool pool;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 7af4302..a01e107 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -368,6 +368,27 @@
return live_in->UnionIfNotIn(live_out, kill);
}
+void LiveInterval::DumpWithContext(std::ostream& stream,
+ const CodeGenerator& codegen) const {
+ Dump(stream);
+ if (IsFixed()) {
+ stream << ", register:" << GetRegister() << "(";
+ if (IsFloatingPoint()) {
+ codegen.DumpFloatingPointRegister(stream, GetRegister());
+ } else {
+ codegen.DumpCoreRegister(stream, GetRegister());
+ }
+ stream << ")";
+ } else {
+ stream << ", spill slot:" << GetSpillSlot();
+ }
+ stream << ", requires_register:" << (GetDefinedBy() != nullptr && RequiresRegister());
+ if (GetParent()->GetDefinedBy() != nullptr) {
+ stream << ", defined_by:" << GetParent()->GetDefinedBy()->GetKind();
+ stream << "(" << GetParent()->GetDefinedBy()->GetLifetimePosition() << ")";
+ }
+}
+
static int RegisterOrLowRegister(Location location) {
return location.IsPair() ? location.low() : location.reg();
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index dc98864..346753b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -150,9 +150,7 @@
if (GetIsEnvironment()) return false;
if (IsSynthesized()) return false;
Location location = GetUser()->GetLocations()->InAt(GetInputIndex());
- return location.IsUnallocated()
- && (location.GetPolicy() == Location::kRequiresRegister
- || location.GetPolicy() == Location::kRequiresFpuRegister);
+ return location.IsUnallocated() && location.RequiresRegisterKind();
}
private:
@@ -481,6 +479,10 @@
return last_range_->GetEnd();
}
+ size_t GetLength() const {
+ return GetEnd() - GetStart();
+ }
+
size_t FirstRegisterUseAfter(size_t position) const {
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
@@ -504,10 +506,16 @@
return kNoLifetime;
}
+ // Returns the location of the first register use for this live interval,
+ // including a register definition if applicable.
size_t FirstRegisterUse() const {
return FirstRegisterUseAfter(GetStart());
}
+ // Whether the interval requires a register rather than a stack location.
+ // If needed for performance, this could be cached.
+ bool RequiresRegister() const { return FirstRegisterUse() != kNoLifetime; }
+
size_t FirstUseAfter(size_t position) const {
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
@@ -693,6 +701,10 @@
stream << " is_high: " << IsHighInterval();
}
+ // Same as Dump, but adds context such as the instruction defining this interval, and
+ // the register currently assigned to this interval.
+ void DumpWithContext(std::ostream& stream, const CodeGenerator& codegen) const;
+
LiveInterval* GetNextSibling() const { return next_sibling_; }
LiveInterval* GetLastSibling() {
LiveInterval* result = this;
@@ -871,6 +883,33 @@
range_search_start_ = first_range_;
}
+ bool DefinitionRequiresRegister() const {
+ DCHECK(IsParent());
+ LocationSummary* locations = defined_by_->GetLocations();
+ Location location = locations->Out();
+ // This interval is the first interval of the instruction. If the output
+ // of the instruction requires a register, we return the position of that instruction
+ // as the first register use.
+ if (location.IsUnallocated()) {
+ if ((location.GetPolicy() == Location::kRequiresRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsRegister()
+ || locations->InAt(0).IsRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
+ return true;
+ } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsFpuRegister()
+ || locations->InAt(0).IsFpuRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
+ return true;
+ }
+ } else if (location.IsRegister() || location.IsRegisterPair()) {
+ return true;
+ }
+ return false;
+ }
+
private:
LiveInterval(ArenaAllocator* allocator,
Primitive::Type type,
@@ -925,33 +964,6 @@
return range;
}
- bool DefinitionRequiresRegister() const {
- DCHECK(IsParent());
- LocationSummary* locations = defined_by_->GetLocations();
- Location location = locations->Out();
- // This interval is the first interval of the instruction. If the output
- // of the instruction requires a register, we return the position of that instruction
- // as the first register use.
- if (location.IsUnallocated()) {
- if ((location.GetPolicy() == Location::kRequiresRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && (locations->InAt(0).IsRegister()
- || locations->InAt(0).IsRegisterPair()
- || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
- return true;
- } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && (locations->InAt(0).IsFpuRegister()
- || locations->InAt(0).IsFpuRegisterPair()
- || locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
- return true;
- }
- } else if (location.IsRegister() || location.IsRegisterPair()) {
- return true;
- }
- return false;
- }
-
bool IsDefiningPosition(size_t position) const {
return IsParent() && (position == GetStart());
}
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 195159f..8aa315a 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -69,8 +69,8 @@
};
X86MemoryOperandGeneration::X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen)
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
: HOptimization(graph, kX86MemoryOperandGenerationPassName, stats),
do_implicit_null_checks_(codegen->GetCompilerOptions().GetImplicitNullChecks()) {
}
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index 7e88681..5f15d9f 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -28,8 +28,8 @@
class X86MemoryOperandGeneration : public HOptimization {
public:
X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen);
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats);
void Run() OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1796b39..d5cd59d 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -376,508 +376,6 @@
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::ArmCore(static_cast<int>(reg));
-}
-
-static dwarf::Reg DWARFReg(SRegister reg) {
- return dwarf::Reg::ArmFp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
-
-void ArmAssembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- CHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet
- CHECK_ALIGNED(frame_size, kStackAlignment);
- CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
-
- // Push callee saves and link register.
- RegList core_spill_mask = 1 << LR;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
- PushList(core_spill_mask);
- cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
- if (fp_spill_mask != 0) {
- vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
- }
-
- // Increase frame to required size.
- int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
- IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
-
- // Write out Method*.
- StoreToOffset(kStoreWord, R0, SP, 0);
-
- // Write out entry spills.
- int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ArmManagedRegister reg = entry_spills.at(i).AsArm();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsCoreRegister()) {
- StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- }
- }
-}
-
-void ArmAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
-
- // Compute callee saves to pop and PC.
- RegList core_spill_mask = 1 << PC;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
-
- // Decrease frame to start of callee saves.
- int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pop_values * kFramePointerSize);
- DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
-
- if (fp_spill_mask != 0) {
- vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
- cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
- }
-
- // Pop callee saves and PC.
- PopList(core_spill_mask);
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void ArmAssembler::IncreaseFrameSize(size_t adjust) {
- AddConstant(SP, -adjust);
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void ArmAssembler::DecreaseFrameSize(size_t adjust) {
- AddConstant(SP, adjust);
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
- ArmManagedRegister src = msrc.AsArm();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCoreRegister()) {
- CHECK_EQ(4u, size);
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
- StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
- SP, dest.Int32Value() + 4);
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
- }
-}
-
-void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
- FrameOffset in_off, ManagedRegister mscratch) {
- ArmManagedRegister src = msrc.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
-}
-
-void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dst.AsCoreRegister());
- }
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
-}
-
-void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
-}
-
-void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
-}
-
-static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
- Register src_register, int32_t src_offset, size_t size) {
- ArmManagedRegister dst = m_dst.AsArm();
- if (dst.IsNoRegister()) {
- CHECK_EQ(0u, size) << dst;
- } else if (dst.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
- } else if (dst.IsRegisterPair()) {
- CHECK_EQ(8u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
- } else if (dst.IsSRegister()) {
- assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
- } else {
- CHECK(dst.IsDRegister()) << dst;
- assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
- }
-}
-
-void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
- return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) {
- ArmManagedRegister dst = m_dst.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
- StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
-}
-
-void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
-}
-
-void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
- ArmManagedRegister dst = m_dst.AsArm();
- ArmManagedRegister src = m_src.AsArm();
- if (!dst.Equals(src)) {
- if (dst.IsCoreRegister()) {
- CHECK(src.IsCoreRegister()) << src;
- mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
- } else if (dst.IsDRegister()) {
- CHECK(src.IsDRegister()) << src;
- vmovd(dst.AsDRegister(), src.AsDRegister());
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- vmovs(dst.AsSRegister(), src.AsSRegister());
- } else {
- CHECK(dst.IsRegisterPair()) << dst;
- CHECK(src.IsRegisterPair()) << src;
- // Ensure that the first move doesn't clobber the input of the second.
- if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- } else {
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- }
- }
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
-}
-
-void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CHECK_EQ(size, 4u);
- Register scratch = mscratch.AsArm().AsCoreRegister();
- LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, handle_scope_offset.Int32Value());
- in_reg = out_reg;
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- if (!out_reg.Equals(in_reg)) {
- it(EQ, kItElse);
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
- } else {
- it(NE);
- }
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
-}
-
-void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- if (null_allowed) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
-}
-
-void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- CHECK(in_reg.IsCoreRegister()) << in_reg;
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- in_reg.AsCoreRegister(), 0, NE);
-}
-
-void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister base = mbase.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(base.IsCoreRegister()) << base;
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- base.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call.
-}
-
-void ArmAssembler::Call(FrameOffset base, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, base.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- scratch.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call
-}
-
-void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
- mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
-}
-
-void ArmAssembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*scratch*/) {
- StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
-}
-
-void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
- ArmManagedRegister scratch = mscratch.AsArm();
- ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord,
- scratch.AsCoreRegister(),
- TR,
- Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- b(slow->Entry(), NE);
-}
-
-void ArmExceptionSlowPath::Emit(Assembler* sasm) {
- ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving R0 as this call won't return.
- __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
- // Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord,
- R12,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
- __ blx(R12);
-#undef __
-}
-
-
static int LeadingZeros(uint32_t val) {
uint32_t alt;
int32_t n;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 2b7414d..ff0bbaf 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -23,12 +23,14 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/bit_utils.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "offsets.h"
namespace art {
@@ -880,122 +882,6 @@
virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
virtual void CompareAndBranchIfNonZero(Register r, Label* label) = 0;
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
static uint32_t ModifiedImmediate(uint32_t value);
static bool IsLowRegister(Register r) {
@@ -1073,18 +959,6 @@
ArenaVector<Label*> tracked_labels_;
};
-// Slowpath entered when Thread::Current()->_exception is non-null
-class ArmExceptionSlowPath FINAL : public SlowPath {
- public:
- ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
- void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const ArmManagedRegister scratch_;
- const size_t stack_adjust_;
-};
-
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index c95dfa8..6f9d5f3 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1664,12 +1664,6 @@
}
-void Arm32Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Arm32Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf57ff05f; // dmb
Emit(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 554dd23..044eaa1 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -316,8 +316,6 @@
void Emit(int32_t value);
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 353c729..ee69698 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2325,7 +2325,7 @@
}
Register rn = ad.GetRegister();
- if (IsHighRegister(rn) && rn != SP && rn != PC) {
+ if (IsHighRegister(rn) && (byte || half || (rn != SP && rn != PC))) {
must_be_32bit = true;
}
@@ -2337,24 +2337,24 @@
// Immediate offset
int32_t offset = ad.GetOffset();
- // The 16 bit SP relative instruction can only have a 10 bit offset.
- if (rn == SP && offset >= (1 << 10)) {
- must_be_32bit = true;
- }
-
if (byte) {
// 5 bit offset, no shift.
- if (offset >= (1 << 5)) {
+ if ((offset & ~0x1f) != 0) {
must_be_32bit = true;
}
} else if (half) {
- // 6 bit offset, shifted by 1.
- if (offset >= (1 << 6)) {
+ // 5 bit offset, shifted by 1.
+ if ((offset & ~(0x1f << 1)) != 0) {
+ must_be_32bit = true;
+ }
+ } else if (rn == SP || rn == PC) {
+ // The 16 bit SP/PC relative instruction can only have an (imm8 << 2) offset.
+ if ((offset & ~(0xff << 2)) != 0) {
must_be_32bit = true;
}
} else {
- // 7 bit offset, shifted by 2.
- if (offset >= (1 << 7)) {
+ // 5 bit offset, shifted by 2.
+ if ((offset & ~(0x1f << 2)) != 0) {
must_be_32bit = true;
}
}
@@ -2370,7 +2370,7 @@
} else {
// 16 bit thumb1.
uint8_t opA = 0;
- bool sp_relative = false;
+ bool sp_or_pc_relative = false;
if (byte) {
opA = 7U /* 0b0111 */;
@@ -2379,7 +2379,10 @@
} else {
if (rn == SP) {
opA = 9U /* 0b1001 */;
- sp_relative = true;
+ sp_or_pc_relative = true;
+ } else if (rn == PC) {
+ opA = 4U;
+ sp_or_pc_relative = true;
} else {
opA = 6U /* 0b0110 */;
}
@@ -2388,7 +2391,7 @@
(load ? B11 : 0);
CHECK_GE(offset, 0);
- if (sp_relative) {
+ if (sp_or_pc_relative) {
// SP relative, 10 bit offset.
CHECK_LT(offset, (1 << 10));
CHECK_ALIGNED(offset, 4);
@@ -3860,12 +3863,6 @@
}
-void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Thumb2Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
Emit32(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 4ee23c0..1c1c98b 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -368,8 +368,6 @@
void Emit16(int16_t value); // Emit a 16 bit instruction in little endian format.
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
// Force the assembler to generate 32 bit instructions.
void Force32Bit() {
force_32bit_ = true;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index abb09f7..3ca3714 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -279,6 +279,148 @@
DriverStr(expected, "smull");
}
+TEST_F(AssemblerThumb2Test, LoadByteFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedByte;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 31);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 32);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4095);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "ldrb r0, [r7, #0]\n"
+ "ldrb r1, [r7, #31]\n"
+ "ldrb.w r2, [r7, #32]\n"
+ "ldrb.w r3, [r7, #4095]\n"
+ "ldrb.w r4, [sp, #0]\n";
+ DriverStr(expected, "LoadByteFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreByteToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreByte;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 31);
+ __ StoreToOffset(type, arm::R2, arm::R7, 32);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4095);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "strb r0, [r7, #0]\n"
+ "strb r1, [r7, #31]\n"
+ "strb.w r2, [r7, #32]\n"
+ "strb.w r3, [r7, #4095]\n"
+ "strb.w r4, [sp, #0]\n";
+ DriverStr(expected, "StoreByteToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadHalfFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedHalfword;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 62);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 64);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4094);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "ldrh r0, [r7, #0]\n"
+ "ldrh r1, [r7, #62]\n"
+ "ldrh.w r2, [r7, #64]\n"
+ "ldrh.w r3, [r7, #4094]\n"
+ "ldrh.w r4, [sp, #0]\n"
+ "ldrh.w r5, [r7, #1]\n";
+ DriverStr(expected, "LoadHalfFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreHalfToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreHalfword;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 62);
+ __ StoreToOffset(type, arm::R2, arm::R7, 64);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4094);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+ __ StoreToOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "strh r0, [r7, #0]\n"
+ "strh r1, [r7, #62]\n"
+ "strh.w r2, [r7, #64]\n"
+ "strh.w r3, [r7, #4094]\n"
+ "strh.w r4, [sp, #0]\n"
+ "strh.w r5, [r7, #1]\n";
+ DriverStr(expected, "StoreHalfToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromSpPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R1, arm::SP, 124);
+ __ LoadFromOffset(type, arm::R2, arm::SP, 128);
+ __ LoadFromOffset(type, arm::R3, arm::SP, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::SP, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [sp, #0]\n"
+ "ldr r1, [sp, #124]\n"
+ "ldr r2, [sp, #128]\n"
+ "ldr r3, [sp, #1020]\n"
+ "ldr.w r4, [sp, #1024]\n"
+ "ldr.w r5, [sp, #4092]\n"
+ "ldr.w r6, [sp, #1]\n";
+ DriverStr(expected, "LoadWordFromSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreWordToSpPlusOffset) {
+ arm::StoreOperandType type = arm::kStoreWord;
+
+ __ StoreToOffset(type, arm::R0, arm::SP, 0);
+ __ StoreToOffset(type, arm::R1, arm::SP, 124);
+ __ StoreToOffset(type, arm::R2, arm::SP, 128);
+ __ StoreToOffset(type, arm::R3, arm::SP, 1020);
+ __ StoreToOffset(type, arm::R4, arm::SP, 1024);
+ __ StoreToOffset(type, arm::R5, arm::SP, 4092);
+ __ StoreToOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "str r0, [sp, #0]\n"
+ "str r1, [sp, #124]\n"
+ "str r2, [sp, #128]\n"
+ "str r3, [sp, #1020]\n"
+ "str.w r4, [sp, #1024]\n"
+ "str.w r5, [sp, #4092]\n"
+ "str.w r6, [sp, #1]\n";
+ DriverStr(expected, "StoreWordToSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromPcPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::PC, 0);
+ __ LoadFromOffset(type, arm::R1, arm::PC, 124);
+ __ LoadFromOffset(type, arm::R2, arm::PC, 128);
+ __ LoadFromOffset(type, arm::R3, arm::PC, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::PC, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::PC, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::PC, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [pc, #0]\n"
+ "ldr r1, [pc, #124]\n"
+ "ldr r2, [pc, #128]\n"
+ "ldr r3, [pc, #1020]\n"
+ "ldr.w r4, [pc, #1024]\n"
+ "ldr.w r5, [pc, #4092]\n"
+ "ldr.w r6, [pc, #1]\n";
+ DriverStr(expected, "LoadWordFromPcPlusOffset");
+}
+
TEST_F(AssemblerThumb2Test, StoreWordToThumbOffset) {
arm::StoreOperandType type = arm::kStoreWord;
int32_t offset = 4092;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
new file mode 100644
index 0000000..c039816
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm.h"
+
+#include <algorithm>
+
+#include "assembler_arm32.h"
+#include "assembler_thumb2.h"
+#include "base/arena_allocator.h"
+#include "base/bit_utils.h"
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+
+namespace art {
+namespace arm {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath FINAL : public SlowPath {
+ public:
+ ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+ void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+ArmJNIMacroAssembler::ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ asm_.reset(new (arena) Arm32Assembler(arena));
+ break;
+
+ case kThumb2:
+ asm_.reset(new (arena) Thumb2Assembler(arena));
+ break;
+
+ default:
+ LOG(FATAL) << isa;
+ UNREACHABLE();
+ }
+}
+
+ArmJNIMacroAssembler::~ArmJNIMacroAssembler() {
+}
+
+size_t ArmJNIMacroAssembler::CodeSize() const {
+ return asm_->CodeSize();
+}
+
+DebugFrameOpCodeWriterForAssembler& ArmJNIMacroAssembler::cfi() {
+ return asm_->cfi();
+}
+
+void ArmJNIMacroAssembler::FinalizeCode() {
+ asm_->FinalizeCode();
+}
+
+void ArmJNIMacroAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ asm_->FinalizeInstructions(region);
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg));
+}
+
+#define __ asm_->
+
+void ArmJNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_EQ(CodeSize(), 0U); // Nothing emitted yet
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+ // Push callee saves and link register.
+ RegList core_spill_mask = 1 << LR;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+ __ PushList(core_spill_mask);
+ cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
+ if (fp_spill_mask != 0) {
+ __ vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
+ }
+
+ // Increase frame to required size.
+ int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
+ IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
+
+ // Write out Method*.
+ __ StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + kFramePointerSize;
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ __ StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ __ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ __ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+
+ // Compute callee saves to pop and PC.
+ RegList core_spill_mask = 1 << PC;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+
+ // Decrease frame to start of callee saves.
+ int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pop_values * kFramePointerSize);
+ DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
+
+ if (fp_spill_mask != 0) {
+ __ vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+ cfi().RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
+ }
+
+ // Pop callee saves and PC.
+ __ PopList(core_spill_mask);
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void ArmJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ __ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(ArmAssembler* assembler, size_t adjust) {
+ assembler->AddConstant(SP, adjust);
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void ArmJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(asm_.get(), adjust);
+}
+
+void ArmJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ ArmManagedRegister src = msrc.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), SP, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ __ StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ __ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void ArmJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreSpanning(FrameOffset dest,
+ ManagedRegister msrc,
+ FrameOffset in_off,
+ ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + sizeof(uint32_t));
+}
+
+void ArmJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dst.AsCoreRegister());
+ }
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadImmediate(scratch.AsCoreRegister(), imm);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler,
+ ManagedRegister m_dst,
+ Register src_register,
+ int32_t src_offset,
+ size_t size) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsSRegister()) {
+ assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void ArmJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ __ mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ __ vmovd(dst.AsDRegister(), src.AsDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ __ vmovs(dst.AsSRegister(), src.AsSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second.
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ } else {
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ }
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord,
+ scratch,
+ dest_base.AsArm().AsCoreRegister(),
+ dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ __ LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ __ it(EQ, kItElse);
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ } else {
+ __ it(NE);
+ }
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmJNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::Call(ManagedRegister mbase, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ base.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call.
+}
+
+void ArmJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /*scratch*/) {
+ __ StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmJNIMacroAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ ArmExceptionSlowPath* slow = new (__ GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ b(slow->Entry(), NE);
+}
+
+#undef __
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+ ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving R0 as this call won't return.
+ __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+ // Set up call to Thread::Current()->pDeliverException.
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
+ __ blx(R12);
+#undef __
+}
+
+void ArmJNIMacroAssembler::MemoryBarrier(ManagedRegister mscratch) {
+ CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+ asm_->dmb(SY);
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.h b/compiler/utils/arm/jni_macro_assembler_arm.h
new file mode 100644
index 0000000..4471906
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+namespace art {
+namespace arm {
+
+class ArmAssembler;
+
+class ArmJNIMacroAssembler : public JNIMacroAssembler<PointerSize::k32> {
+ public:
+ ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa);
+ virtual ~ArmJNIMacroAssembler();
+
+ size_t CodeSize() const OVERRIDE;
+ DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE;
+ void FinalizeCode() OVERRIDE;
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ private:
+ std::unique_ptr<ArmAssembler> asm_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index d82caf5..22221e7 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -28,620 +28,49 @@
#ifdef ___
#error "ARM64 Assembler macro already defined."
#else
-#define ___ vixl_masm_->
+#define ___ vixl_masm_.
#endif
void Arm64Assembler::FinalizeCode() {
- for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
- EmitExceptionPoll(exception.get());
- }
___ FinalizeCode();
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
+ return vixl_masm_.GetBufferCapacity() - vixl_masm_.GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
- return vixl_masm_->GetStartAddress<uint8_t*>();
+ return vixl_masm_.GetStartAddress<uint8_t*>();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
-void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
- ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
-}
-
-void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
- StoreToOffset(TR, SP, offset.Int32Value());
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, -adjust);
- cfi().AdjustCFAOffset(adjust);
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, adjust);
- cfi().AdjustCFAOffset(-adjust);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
- AddConstant(rd, rd, value, cond);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- // VIXL macro-assembler handles all variants.
- ___ Add(reg_x(rd), reg_x(rn), value);
- } else {
- // temp = rd + value
- // rd = cond ? temp : rn
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(rd), reg_x(rn));
- Register temp = temps.AcquireX();
- ___ Add(temp, reg_x(rn), value);
- ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
- }
-}
-
-void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset) {
- switch (type) {
- case kStoreByte:
- ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreHalfword:
- ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreWord:
- ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
- CHECK_NE(source, SP);
- ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister src = m_src.AsArm64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsWRegister()) {
- CHECK_EQ(4u, size);
- StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
- } else if (src.IsXRegister()) {
- CHECK_EQ(8u, size);
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
- }
-}
-
-void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
- uint32_t imm,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(vixl_masm_);
- Register temp = temps.AcquireX();
- ___ Mov(temp, reg_x(SP));
- ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
-}
-
-void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
- FrameOffset in_off, ManagedRegister m_scratch) {
- Arm64ManagedRegister source = m_source.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
-}
-
-// Load routines.
-void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- ___ Mov(reg_x(dest), value);
- } else {
- // temp = value
- // rd = cond ? temp : rd
- if (value != 0) {
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(dest));
- Register temp = temps.AcquireX();
- ___ Mov(temp, value);
- ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
- } else {
- ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
- }
- }
-}
-
-void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
- XRegister base, int32_t offset) {
- switch (type) {
- case kLoadSignedByte:
- ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadSignedHalfword:
- ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedByte:
- ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedHalfword:
- ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadWord:
- ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-// Note: We can extend this member by adding load type info - see
-// sign extended A64 load variants.
-void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base,
- int32_t offset) {
- CHECK_NE(dest, SP);
- ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base,
- int32_t offset, size_t size) {
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size) << dest;
- } else if (dest.IsWRegister()) {
- CHECK_EQ(4u, size) << dest;
- ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
- } else if (dest.IsXRegister()) {
- CHECK_NE(dest.AsXRegister(), SP) << dest;
- if (size == 4u) {
- ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK_EQ(8u, size) << dest;
- ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
- }
- } else if (dest.IsSRegister()) {
- ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK(dest.IsDRegister()) << dest;
- ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
- }
-}
-
-void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
- return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
- bool unpoison_reference) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister base = m_base.AsArm64();
- CHECK(dst.IsXRegister() && base.IsXRegister());
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
- offs.Int32Value());
- if (unpoison_reference) {
- WRegister ref_reg = dst.AsOverlappingWRegister();
- MaybeUnpoisonHeapReference(reg_w(ref_reg));
- }
-}
-
void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
-}
-
-// Copying routines.
-void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- if (!dst.Equals(src)) {
- if (dst.IsXRegister()) {
- if (size == 4) {
- CHECK(src.IsWRegister());
- ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
- } else {
- if (src.IsXRegister()) {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
- } else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
- }
- }
- } else if (dst.IsWRegister()) {
- CHECK(src.IsWRegister()) << src;
- ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
- } else {
- CHECK(dst.IsDRegister()) << dst;
- CHECK(src.IsDRegister()) << src;
- ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
- }
- }
-}
-
-void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 tr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
- SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
- SP, dest.Int32Value());
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = src_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = m_dest_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
- dest_offs.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
- ManagedRegister m_src, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- Arm64ManagedRegister dest = m_dest.AsArm64();
- CHECK(dest.IsXRegister()) << dest;
- CHECK(src.IsXRegister()) << src;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- if (scratch.IsWRegister()) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- } else {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- }
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
- FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
- // TODO: Should we check that m_scratch is IP? - see arm.
- ___ Dmb(InnerShareable, BarrierAll);
-}
-
-void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister base = m_base.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
Arm64ManagedRegister base = m_base.AsArm64();
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
-void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(
- ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
- bool null_allowed) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale handle scope entries in x registers.
- CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
- CHECK(out_reg.IsXRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
- handle_scope_offs.Int32Value());
- in_reg = out_reg;
- }
- ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
- } else {
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
- }
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
- ManagedRegister m_scratch, bool null_allowed) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- if (null_allowed) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
- // Move this logic in add constants with flags.
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
- } else {
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
- }
- StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
-}
-
-void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- CHECK(out_reg.IsXRegister()) << out_reg;
- CHECK(in_reg.IsXRegister()) << in_reg;
- vixl::aarch64::Label exit;
- if (!out_reg.Equals(in_reg)) {
- // FIXME: Who sets the flags here?
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
- LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
- ___ Bind(&exit);
-}
-
-void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
- CHECK_ALIGNED(stack_adjust, kStackAlignment);
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(),
- TR,
- Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
- ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
-}
-
-void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
- Register temp = temps.AcquireX();
-
- // Bind exception poll entry.
- ___ Bind(exception->Entry());
- if (exception->stack_adjust_ != 0) { // Fix up the frame.
- DecreaseFrameSize(exception->stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving X0 as this won't return.
- ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp,
- MEM_OP(reg_x(TR),
- QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
-
- ___ Blr(temp);
- // Call should never return.
- ___ Brk();
-}
-
static inline dwarf::Reg DWARFReg(CPURegister reg) {
if (reg.IsFPRegister()) {
return dwarf::Reg::Arm64Fp(reg.GetCode());
@@ -653,7 +82,7 @@
void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
@@ -681,7 +110,7 @@
void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Be consistent with the logic for spilling registers.
if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -705,105 +134,6 @@
DCHECK(registers.IsEmpty());
}
-void Arm64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // Increase frame to required size.
- DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- IncreaseFrameSize(frame_size);
-
- // Save callee-saves.
- SpillRegisters(core_reg_list, frame_size - core_reg_size);
- SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- // Write ArtMethod*
- DCHECK(X0 == method_reg.AsArm64().AsXRegister());
- StoreToOffset(X0, SP, 0);
-
- // Write out entry spills
- int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsXRegister()) {
- StoreToOffset(reg.AsXRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsWRegister()) {
- StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- }
- }
-}
-
-void Arm64Assembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // For now we only check that the size of the frame is large enough to hold spills and method
- // reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- DCHECK_ALIGNED(frame_size, kStackAlignment);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- cfi_.RememberState();
-
- // Restore callee-saves.
- UnspillRegisters(core_reg_list, frame_size - core_reg_size);
- UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- // Decrease frame size to start of callee saved regs.
- DecreaseFrameSize(frame_size);
-
- // Pop callee saved and return to LR.
- ___ Ret();
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
void Arm64Assembler::PoisonHeapReference(Register reg) {
DCHECK(reg.IsW());
// reg = -reg.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 24b7982..4e88e64 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
@@ -62,38 +61,13 @@
kStoreDWord
};
-class Arm64Exception {
- private:
- Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
-
- vixl::aarch64::Label* Entry() { return &exception_entry_; }
-
- // Register used for passing Thread::Current()->exception_ .
- const Arm64ManagedRegister scratch_;
-
- // Stack adjust for ExceptionPool.
- const size_t stack_adjust_;
-
- vixl::aarch64::Label exception_entry_;
-
- friend class Arm64Assembler;
- DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
class Arm64Assembler FINAL : public Assembler {
public:
- // We indicate the size of the initial code generation buffer to the VIXL
- // assembler. From there we it will automatically manage the buffer.
- explicit Arm64Assembler(ArenaAllocator* arena)
- : Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
+ explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
- virtual ~Arm64Assembler() {
- delete vixl_masm_;
- }
+ virtual ~Arm64Assembler() {}
+
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -105,110 +79,14 @@
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
+
void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
- // Emit code that will create an activation on the stack.
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
- // Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
//
// Heap poisoning.
//
@@ -227,7 +105,6 @@
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
}
- private:
static vixl::aarch64::Register reg_x(int code) {
CHECK(code < kNumberOfXRegisters) << code;
if (code == SP) {
@@ -256,40 +133,9 @@
return vixl::aarch64::FPRegister::GetSRegFromCode(code);
}
- // Emits Exception block.
- void EmitExceptionPoll(Arm64Exception *exception);
-
- void StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset);
- void StoreToOffset(XRegister source, XRegister base, int32_t offset);
- void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
- void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
-
- void LoadImmediate(XRegister dest,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
- void LoadWFromOffset(LoadOperandType type,
- WRegister dest,
- XRegister base,
- int32_t offset);
- void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
- void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
- void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
- void AddConstant(XRegister rd,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void AddConstant(XRegister rd,
- XRegister rn,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
-
- // List of exception blocks to generate at the end of the code cache.
- ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
-
- public:
- // Vixl assembler.
- vixl::aarch64::MacroAssembler* const vixl_masm_;
+ private:
+ // VIXL assembler.
+ vixl::aarch64::MacroAssembler vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
deleted file mode 100644
index 01e8be9..0000000
--- a/compiler/utils/arm64/constants_arm64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-
-#include <stdint.h>
-#include <iosfwd>
-#include "arch/arm64/registers_arm64.h"
-#include "base/casts.h"
-#include "base/logging.h"
-#include "globals.h"
-
-// TODO: Extend this file by adding missing functionality.
-
-namespace art {
-namespace arm64 {
-
-constexpr size_t kArm64BaseBufferSize = 4096;
-
-} // namespace arm64
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
new file mode 100644
index 0000000..dfdcd11
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm64.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "managed_register_arm64.h"
+#include "offsets.h"
+#include "thread.h"
+
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm64 {
+
+#ifdef ___
+#error "ARM64 Assembler macro already defined."
+#else
+#define ___ asm_.GetVIXLAssembler()->
+#endif
+
+#define reg_x(X) Arm64Assembler::reg_x(X)
+#define reg_w(W) Arm64Assembler::reg_w(W)
+#define reg_d(D) Arm64Assembler::reg_d(D)
+#define reg_s(S) Arm64Assembler::reg_s(S)
+
+Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
+}
+
+void Arm64JNIMacroAssembler::FinalizeCode() {
+ for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
+ }
+ ___ FinalizeCode();
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
+ StoreToOffset(TR, SP, offset.Int32Value());
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, adjust);
+ cfi().AdjustCFAOffset(-adjust);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ // VIXL macro-assembler handles all variants.
+ ___ Add(reg_x(rd), reg_x(rn), value);
+ } else {
+ // temp = rd + value
+ // rd = cond ? temp : rn
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(rd), reg_x(rn));
+ Register temp = temps.AcquireX();
+ ___ Add(temp, reg_x(rn), value);
+ ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreByte:
+ ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreHalfword:
+ ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreWord:
+ ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
+ CHECK_NE(source, SP);
+ ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsWRegister()) {
+ CHECK_EQ(4u, size);
+ StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
+ } else if (src.IsXRegister()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+ } else if (src.IsSRegister()) {
+ StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadImmediate(scratch.AsXRegister(), imm);
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, reg_x(SP));
+ ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
+ ManagedRegister m_source,
+ FrameOffset in_off,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister source = m_source.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
+}
+
+// Load routines.
+void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ ___ Mov(reg_x(dest), value);
+ } else {
+ // temp = value
+ // rd = cond ? temp : rd
+ if (value != 0) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dest));
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, value);
+ ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
+ } else {
+ ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadSignedHalfword:
+ ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedByte:
+ ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedHalfword:
+ ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadWord:
+ ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Note: We can extend this member by adding load type info - see
+// sign extended A64 load variants.
+void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
+ CHECK_NE(dest, SP);
+ ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
+ XRegister base,
+ int32_t offset,
+ size_t size) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dest;
+ } else if (dest.IsWRegister()) {
+ CHECK_EQ(4u, size) << dest;
+ ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
+ } else if (dest.IsXRegister()) {
+ CHECK_NE(dest.AsXRegister(), SP) << dest;
+ if (size == 4u) {
+ ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK_EQ(8u, size) << dest;
+ ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
+ }
+ } else if (dest.IsSRegister()) {
+ ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK(dest.IsDRegister()) << dest;
+ ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
+ }
+}
+
+void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+ ThreadOffset64 src,
+ size_t size) {
+ return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ WRegister ref_reg = dst.AsOverlappingWRegister();
+ asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ Offset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
+ ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
+}
+
+// Copying routines.
+void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (!dst.Equals(src)) {
+ if (dst.IsXRegister()) {
+ if (size == 4) {
+ CHECK(src.IsWRegister());
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
+ } else {
+ if (src.IsXRegister()) {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
+ } else {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
+ }
+ }
+ } else if (dst.IsWRegister()) {
+ CHECK(src.IsWRegister()) << src;
+ ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ CHECK(src.IsDRegister()) << src;
+ ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
+ SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
+ SP, dest.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = src_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
+ Offset dest_offs,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = m_dest_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
+ dest_offs.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
+ Offset dest_offset,
+ ManagedRegister m_src,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ Arm64ManagedRegister dest = m_dest.AsArm64();
+ CHECK(dest.IsXRegister()) << dest;
+ CHECK(src.IsXRegister()) << src;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ if (scratch.IsWRegister()) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ } else {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ }
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
+ // TODO: Should we check that m_scratch is IP? - see arm.
+ ___ Dmb(InnerShareable, BarrierAll);
+}
+
+void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister base = m_base.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
+ FrameOffset handle_scope_offs,
+ ManagedRegister m_in_reg,
+ bool null_allowed) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ // For now we only hold stale handle scope entries in x registers.
+ CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
+ handle_scope_offs.Int32Value());
+ in_reg = out_reg;
+ }
+ ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+ } else {
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+ }
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister m_scratch,
+ bool null_allowed) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ if (null_allowed) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
+ // Move this logic in add constants with flags.
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
+ } else {
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
+ }
+ StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
+ ManagedRegister m_in_reg) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ CHECK(in_reg.IsXRegister()) << in_reg;
+ vixl::aarch64::Label exit;
+ if (!out_reg.Equals(in_reg)) {
+ // FIXME: Who sets the flags here?
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
+ LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
+ ___ Bind(&exit);
+}
+
+void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+ CHECK_ALIGNED(stack_adjust, kStackAlignment);
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+ ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
+}
+
+void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
+ Register temp = temps.AcquireX();
+
+ // Bind exception poll entry.
+ ___ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving X0 as this won't return.
+ ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
+
+ ___ Blr(temp);
+ // Call should never return.
+ ___ Brk();
+}
+
+void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // Increase frame to required size.
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ IncreaseFrameSize(frame_size);
+
+ // Save callee-saves.
+ asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ // Write ArtMethod*
+ DCHECK(X0 == method_reg.AsArm64().AsXRegister());
+ StoreToOffset(X0, SP, 0);
+
+ // Write out entry spills
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsXRegister()) {
+ StoreToOffset(reg.AsXRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsWRegister()) {
+ StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsSRegister()) {
+ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // For now we only check that the size of the frame is large enough to hold spills and method
+ // reference.
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ cfi().RememberState();
+
+ // Restore callee-saves.
+ asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ // Decrease frame size to start of callee saved regs.
+ DecreaseFrameSize(frame_size);
+
+ // Pop callee saved and return to LR.
+ ___ Ret();
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+#undef ___
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
new file mode 100644
index 0000000..79ee441
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+#define ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include "assembler_arm64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm64 {
+
+class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+ public:
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+ ~Arm64JNIMacroAssembler();
+
+ // Finalize the code.
+ void FinalizeCode() OVERRIDE;
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines.
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ class Arm64Exception {
+ public:
+ Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ vixl::aarch64::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const Arm64ManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl::aarch64::Label exception_entry_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
+ };
+
+ // Emits Exception block.
+ void EmitExceptionPoll(Arm64Exception *exception);
+
+ void StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset);
+ void StoreToOffset(XRegister source, XRegister base, int32_t offset);
+ void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
+ void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
+
+ void LoadImmediate(XRegister dest,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
+ void LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset);
+ void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
+ void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
+ void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
+ void AddConstant(XRegister rd,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+
+ // List of exception blocks to generate at the end of the code cache.
+ ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index f7d74d2..7378a0a 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
+#include "arch/arm64/registers_arm64.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 0a1b733..81159e6 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -121,137 +121,4 @@
}
}
-std::unique_ptr<Assembler> Assembler::Create(
- ArenaAllocator* arena,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features) {
- switch (instruction_set) {
-#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- return std::unique_ptr<Assembler>(new (arena) arm::Arm32Assembler(arena));
- case kThumb2:
- return std::unique_ptr<Assembler>(new (arena) arm::Thumb2Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
- return std::unique_ptr<Assembler>(new (arena) arm64::Arm64Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
- return std::unique_ptr<Assembler>(new (arena) mips::MipsAssembler(
- arena,
- instruction_set_features != nullptr
- ? instruction_set_features->AsMipsInstructionSetFeatures()
- : nullptr));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
- return std::unique_ptr<Assembler>(new (arena) mips64::Mips64Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
- return std::unique_ptr<Assembler>(new (arena) x86::X86Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
- return std::unique_ptr<Assembler>(new (arena) x86_64::X86_64Assembler(arena));
-#endif
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return nullptr;
- }
-}
-
-void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED,
- uint32_t imm ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED,
- uint32_t imm ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackOffsetToThread32(
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackOffsetToThread64(
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackPointerToThread32(
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackPointerToThread64(
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset32 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset64 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset32 offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset64 offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
} // namespace art
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 89f7947..8981776 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -356,11 +356,6 @@
class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
- static std::unique_ptr<Assembler> Create(
- ArenaAllocator* arena,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features = nullptr);
-
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
@@ -376,144 +371,6 @@
// TODO: Implement with disassembler.
virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
- // Emit code that will create an activation on the stack
- virtual void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) = 0;
-
- // Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
-
- virtual void IncreaseFrameSize(size_t adjust) = 0;
- virtual void DecreaseFrameSize(size_t adjust) = 0;
-
- // Store routines
- virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
- virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
-
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
-
- virtual void StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
- ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset64 dest,
- uint32_t imm,
- ManagedRegister scratch);
-
- virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
-
- virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
- FrameOffset in_off, ManagedRegister scratch) = 0;
-
- // Load routines
- virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
-
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
-
- virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
- // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
- virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) = 0;
-
- virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
-
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
-
- // Copying routines
- virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
-
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void MemoryBarrier(ManagedRegister scratch) = 0;
-
- // Sign extension
- virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Zero extension
- virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Exploit fast access in managed code to Thread::Current()
- virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) = 0;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- ManagedRegister scratch,
- bool null_allowed) = 0;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
- virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
-
- // Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
-
virtual void Bind(Label* label) = 0;
virtual void Jump(Label* label) = 0;
@@ -525,13 +382,17 @@
*/
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
- protected:
- explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
-
ArenaAllocator* GetArena() {
return buffer_.GetArena();
}
+ AssemblerBuffer* GetBuffer() {
+ return &buffer_;
+ }
+
+ protected:
+ explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
+
AssemblerBuffer buffer_;
DebugFrameOpCodeWriterForAssembler cfi_;
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
new file mode 100644
index 0000000..797a98c
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler.h"
+
+#include <algorithm>
+#include <vector>
+
+#ifdef ART_ENABLE_CODEGEN_arm
+#include "arm/jni_macro_assembler_arm.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "arm64/jni_macro_assembler_arm64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "mips/assembler_mips.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+#include "mips64/assembler_mips64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "x86/jni_macro_assembler_x86.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "x86_64/jni_macro_assembler_x86_64.h"
+#endif
+#include "base/casts.h"
+#include "globals.h"
+#include "memory_region.h"
+
+namespace art {
+
+using MacroAsm32UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k32>>;
+
+template <>
+MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features) {
+#ifndef ART_ENABLE_CODEGEN_mips
+ UNUSED(instruction_set_features);
+#endif
+
+ switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
+ case kArm:
+ case kThumb2:
+ return MacroAsm32UniquePtr(new (arena) arm::ArmJNIMacroAssembler(arena, instruction_set));
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ case kMips:
+ return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler(
+ arena,
+ instruction_set_features != nullptr
+ ? instruction_set_features->AsMipsInstructionSetFeatures()
+ : nullptr));
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case kX86:
+ return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena));
+#endif
+ default:
+ LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set;
+ UNREACHABLE();
+ }
+}
+
+using MacroAsm64UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k64>>;
+
+template <>
+MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features ATTRIBUTE_UNUSED) {
+ switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm64
+ case kArm64:
+ return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+ case kMips64:
+ return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(arena));
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case kX86_64:
+ return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
+#endif
+ default:
+ LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
new file mode 100644
index 0000000..6f45bd6
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
+#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
+
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/arena_allocator.h"
+#include "base/arena_object.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "managed_register.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+
+namespace art {
+
+class ArenaAllocator;
+class DebugFrameOpCodeWriterForAssembler;
+class InstructionSetFeatures;
+class MemoryRegion;
+
+template <PointerSize kPointerSize>
+class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
+ public:
+ static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features = nullptr);
+
+ // Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
+ virtual void FinalizeCode() = 0;
+
+ // Size of generated code
+ virtual size_t CodeSize() const = 0;
+
+ // Copy instructions out of assembly buffer into the given region of memory
+ virtual void FinalizeInstructions(const MemoryRegion& region) = 0;
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) = 0;
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+
+ virtual void IncreaseFrameSize(size_t adjust) = 0;
+ virtual void DecreaseFrameSize(size_t adjust) = 0;
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
+
+ virtual void StoreStackOffsetToThread(ThreadOffset<kPointerSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs) = 0;
+
+ virtual void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) = 0;
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
+
+ virtual void LoadFromThread(ManagedRegister dest,
+ ThreadOffset<kPointerSize> src,
+ size_t size) = 0;
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
+ // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
+ virtual void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) = 0;
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset<kPointerSize> offs) = 0;
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset<kPointerSize> thr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRawPtrToThread(ThreadOffset<kPointerSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void MemoryBarrier(ManagedRegister scratch) = 0;
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ virtual void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) = 0;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
+ virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void CallFromThread(ThreadOffset<kPointerSize> offset, ManagedRegister scratch) = 0;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+
+ virtual ~JNIMacroAssembler() {}
+
+ /**
+ * @brief Buffer of DWARF's Call Frame Information opcodes.
+ * @details It is used by debuggers and other tools to unwind the call stack.
+ */
+ virtual DebugFrameOpCodeWriterForAssembler& cfi() = 0;
+
+ protected:
+ explicit JNIMacroAssembler() {}
+};
+
+template <typename T, PointerSize kPointerSize>
+class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
+ public:
+ void FinalizeCode() OVERRIDE {
+ asm_.FinalizeCode();
+ }
+
+ size_t CodeSize() const OVERRIDE {
+ return asm_.CodeSize();
+ }
+
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+ asm_.FinalizeInstructions(region);
+ }
+
+ DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+ return asm_.cfi();
+ }
+
+ protected:
+ explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {}
+
+ T asm_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
new file mode 100644
index 0000000..829f34b
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
+#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
+
+#include "jni_macro_assembler.h"
+
+#include "assembler_test_base.h"
+#include "common_runtime_test.h" // For ScratchFile
+
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iterator>
+#include <sys/stat.h>
+
+namespace art {
+
+template<typename Ass>
+class JNIMacroAssemblerTest : public testing::Test {
+ public:
+ Ass* GetAssembler() {
+ return assembler_.get();
+ }
+
+ typedef std::string (*TestFn)(JNIMacroAssemblerTest* assembler_test, Ass* assembler);
+
+ void DriverFn(TestFn f, std::string test_name) {
+ DriverWrapper(f(this, assembler_.get()), test_name);
+ }
+
+ // This driver assumes the assembler has already been called.
+ void DriverStr(std::string assembly_string, std::string test_name) {
+ DriverWrapper(assembly_string, test_name);
+ }
+
+ // This is intended to be run as a test.
+ bool CheckTools() {
+ return test_helper_->CheckTools();
+ }
+
+ protected:
+ explicit JNIMacroAssemblerTest() {}
+
+ void SetUp() OVERRIDE {
+ arena_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(arena_.get()));
+ test_helper_.reset(
+ new AssemblerTestInfrastructure(GetArchitectureString(),
+ GetAssemblerCmdName(),
+ GetAssemblerParameters(),
+ GetObjdumpCmdName(),
+ GetObjdumpParameters(),
+ GetDisassembleCmdName(),
+ GetDisassembleParameters(),
+ GetAssemblyHeader()));
+
+ SetUpHelpers();
+ }
+
+ void TearDown() OVERRIDE {
+ test_helper_.reset(); // Clean up the helper.
+ assembler_.reset();
+ arena_.reset();
+ }
+
+ // Override this to set up any architecture-specific things, e.g., CPU revision.
+ virtual Ass* CreateAssembler(ArenaAllocator* arena) {
+ return new (arena) Ass(arena);
+ }
+
+ // Override this to set up any architecture-specific things, e.g., register vectors.
+ virtual void SetUpHelpers() {}
+
+ // Get the typically used name for this architecture, e.g., aarch64, x86_64, ...
+ virtual std::string GetArchitectureString() = 0;
+
+ // Get the name of the assembler, e.g., "as" by default.
+ virtual std::string GetAssemblerCmdName() {
+ return "as";
+ }
+
+ // Switches to the assembler command. Default none.
+ virtual std::string GetAssemblerParameters() {
+ return "";
+ }
+
+ // Get the name of the objdump, e.g., "objdump" by default.
+ virtual std::string GetObjdumpCmdName() {
+ return "objdump";
+ }
+
+ // Switches to the objdump command. Default is " -h".
+ virtual std::string GetObjdumpParameters() {
+ return " -h";
+ }
+
+ // Get the name of the objdump, e.g., "objdump" by default.
+ virtual std::string GetDisassembleCmdName() {
+ return "objdump";
+ }
+
+ // Switches to the objdump command. As it's a binary, one needs to push the architecture and
+ // such to objdump, so it's architecture-specific and there is no default.
+ virtual std::string GetDisassembleParameters() = 0;
+
+ // If the assembly file needs a header, return it in a sub-class.
+ virtual const char* GetAssemblyHeader() {
+ return nullptr;
+ }
+
+ private:
+ // Override this to pad the code with NOPs to a certain size if needed.
+ virtual void Pad(std::vector<uint8_t>& data ATTRIBUTE_UNUSED) {
+ }
+
+ void DriverWrapper(std::string assembly_text, std::string test_name) {
+ assembler_->FinalizeCode();
+ size_t cs = assembler_->CodeSize();
+ std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*data)[0], data->size());
+ assembler_->FinalizeInstructions(code);
+ Pad(*data);
+ test_helper_->Driver(*data, assembly_text, test_name);
+ }
+
+ ArenaPool pool_;
+ std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<Ass> assembler_;
+ std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(JNIMacroAssemblerTest);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index e6b32de..8b7da3f 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -2799,27 +2799,17 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
+void MipsAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- // Is this function even referenced anywhere else in the code?
- LoadConst32(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
-}
-
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- MipsManagedRegister scratch = mscratch.AsMips();
- CHECK(scratch.IsCoreRegister()) << scratch;
Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
+void MipsAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -2836,7 +2826,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
+void MipsAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2864,7 +2854,7 @@
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
-void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) {
+void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -2918,9 +2908,9 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -2929,9 +2919,9 @@
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -3103,8 +3093,8 @@
// TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+void MipsAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 852ced6..41b6c6b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -23,12 +23,14 @@
#include "arch/mips/instruction_set_features_mips.h"
#include "base/arena_containers.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
#include "offsets.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "utils/label.h"
namespace art {
@@ -145,7 +147,7 @@
DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
-class MipsAssembler FINAL : public Assembler {
+class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
explicit MipsAssembler(ArenaAllocator* arena,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
@@ -160,6 +162,9 @@
cfi().DelayEmittingAdvancePCs();
}
+ size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+
virtual ~MipsAssembler() {
for (auto& branch : branches_) {
CHECK(branch.IsResolved());
@@ -500,15 +505,11 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
@@ -518,7 +519,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -529,19 +530,19 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
@@ -617,7 +618,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 3fd77a0..a2621cb 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2115,16 +2115,16 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -2141,7 +2141,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
+void Mips64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2174,7 +2174,7 @@
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
-void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
+void Mips64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -2218,18 +2218,18 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
@@ -2431,8 +2431,8 @@
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+void Mips64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 1ad05b0..a7d350c 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -20,12 +20,14 @@
#include <utility>
#include <vector>
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips64.h"
#include "globals.h"
#include "managed_register_mips64.h"
#include "offsets.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "utils/label.h"
namespace art {
@@ -100,7 +102,7 @@
DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
};
-class Mips64Assembler FINAL : public Assembler {
+class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
explicit Mips64Assembler(ArenaAllocator* arena)
: Assembler(arena),
@@ -118,6 +120,9 @@
}
}
+ size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+
// Emit Machine Instructions.
void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
@@ -383,11 +388,11 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
@@ -395,7 +400,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -404,19 +409,19 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
@@ -471,7 +476,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index bd5fc40..f1a9915 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1943,489 +1943,6 @@
EmitOperand(reg_or_opcode, operand);
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86Core(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 4;
-
-void X86Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(4); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- Register spill = spill_regs[i].AsX86().AsCpuRegister();
- pushl(spill);
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill), 0);
- }
-
- // return address then method on stack.
- int32_t adjust = frame_size - gpr_count * kFramePointerSize -
- kFramePointerSize /*method*/ -
- kFramePointerSize /*return address*/;
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
- pushl(method_reg.AsX86().AsCpuRegister());
- cfi_.AdjustCFAOffset(kFramePointerSize);
- DCHECK_EQ(static_cast<size_t>(cfi_.GetCurrentCFAOffset()), frame_size);
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86().IsCpuRegister()) {
- int offset = frame_size + spill.getSpillOffset();
- movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
- } else {
- DCHECK(spill.AsX86().IsXmmRegister());
- if (spill.getSize() == 8) {
- movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- }
- }
- }
-}
-
-void X86Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- // -kFramePointerSize for ArtMethod*.
- int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- Register spill = spill_regs[i].AsX86().AsCpuRegister();
- popl(spill);
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill));
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86ManagedRegister src = msrc.AsX86();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(Address(ESP, offs), src.AsCpuRegister());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(Address(ESP, offs), src.AsRegisterPairLow());
- movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(ESP, offs));
- } else {
- fstpl(Address(ESP, offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(ESP, offs), src.AsXmmRegister());
- } else {
- movsd(Address(ESP, offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(ESP, dest), Immediate(imm));
-}
-
-void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) {
- fs()->movl(Address::Absolute(dest), Immediate(imm));
-}
-
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
- fs()->movl(Address::Absolute(thr_offs), ESP);
-}
-
-void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(ESP, src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(dest.AsRegisterPairLow(), Address(ESP, src));
- movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(ESP, src));
- } else {
- fldl(Address(ESP, src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(ESP, src));
- } else {
- movsd(dest.AsXmmRegister(), Address(ESP, src));
- }
- }
-}
-
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- fs()->flds(Address::Absolute(src));
- } else {
- fs()->fldl(Address::Absolute(src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
- } else {
- fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
- }
- }
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(ESP, src));
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
-}
-
-void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset32 offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
-}
-
-void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- X86ManagedRegister src = msrc.AsX86();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movl(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(ESP, Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(ESP, 0));
- movss(dest.AsXmmRegister(), Address(ESP, 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(ESP, 0));
- movsd(dest.AsXmmRegister(), Address(ESP, 0));
- }
- addl(ESP, Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(ESP, src));
- movl(Address(ESP, dest), scratch.AsCpuRegister());
-}
-
-void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
- Store(fr_offs, scratch, 4);
-}
-
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 4);
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch,
- size_t size) {
- X86ManagedRegister scratch = mscratch.AsX86();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushl(Address(ESP, src));
- popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movl(scratch, Address(ESP, src_base));
- movl(scratch, Address(scratch, src_offset));
- movl(Address(ESP, dest), scratch);
-}
-
-void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
- popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movl(scratch, Address(ESP, src));
- pushl(Address(scratch, src_offset));
- popl(Address(scratch, dest_offset));
-}
-
-void X86Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
-}
-
-void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
- Store(out_off, scratch, 4);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86ManagedRegister base = mbase.AsX86();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- movl(scratch, Address(ESP, base));
- call(Address(scratch, offset));
-}
-
-void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
- fs()->call(Address::Absolute(offset));
-}
-
-void X86Assembler::GetCurrentThread(ManagedRegister tr) {
- fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
-}
-
-void X86Assembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
- movl(Address(ESP, offset), scratch.AsCpuRegister());
-}
-
-void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86ExceptionSlowPath::Emit(Assembler *sasm) {
- X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
// Generate the data for the literal area.
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 6d519e4..92a92a5 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -21,6 +21,7 @@
#include "base/arena_containers.h"
#include "base/bit_utils.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_x86.h"
#include "globals.h"
@@ -631,124 +632,6 @@
void Bind(NearLabel* label);
//
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
- //
// Heap poisoning.
//
@@ -845,15 +728,6 @@
EmitUint8(0x66);
}
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
new file mode 100644
index 0000000..77af885
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86.h"
+
+#include "utils/assembler.h"
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86Core(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 4;
+
+#define __ asm_.
+
+void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(4); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ pushl(spill);
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill), 0);
+ }
+
+ // return address then method on stack.
+ int32_t adjust = frame_size - gpr_count * kFramePointerSize -
+ kFramePointerSize /*method*/ -
+ kFramePointerSize /*return address*/;
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+ __ pushl(method_reg.AsX86().AsCpuRegister());
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86().IsCpuRegister()) {
+ int offset = frame_size + spill.getSpillOffset();
+ __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
+ } else {
+ DCHECK(spill.AsX86().IsXmmRegister());
+ if (spill.getSize() == 8) {
+ __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ // -kFramePointerSize for ArtMethod*.
+ int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
+ __ addl(ESP, Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ popl(spill);
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill));
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addl(ESP, Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(&asm_, adjust);
+}
+
+void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister src = msrc.AsX86();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(ESP, offs), src.AsCpuRegister());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(Address(ESP, offs), src.AsRegisterPairLow());
+ __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(ESP, offs));
+ } else {
+ __ fstpl(Address(ESP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(ESP, offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(ESP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) {
+ __ movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
+ __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(ESP, src));
+ } else {
+ __ fldl(Address(ESP, src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(ESP, src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(ESP, src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+ __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ fs()->flds(Address::Absolute(src));
+ } else {
+ __ fs()->fldl(Address::Absolute(src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+ } else {
+ __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
+ ManagedRegister base,
+ Offset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ X86ManagedRegister src = msrc.AsX86();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(ESP, Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(ESP, 0));
+ __ movss(dest.AsXmmRegister(), Address(ESP, 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(ESP, 0));
+ __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
+ }
+ __ addl(ESP, Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(ESP, src));
+ __ movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+ Store(fr_offs, scratch, 4);
+}
+
+void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 4);
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushl(Address(ESP, src));
+ __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movl(scratch, Address(ESP, src_base));
+ __ movl(scratch, Address(scratch, src_offset));
+ __ movl(Address(ESP, dest), scratch);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+ __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movl(scratch, Address(ESP, src));
+ __ pushl(Address(scratch, src_offset));
+ __ popl(Address(scratch, dest_offset));
+}
+
+void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+ Store(out_off, scratch, 4);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86ManagedRegister base = mbase.AsX86();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ __ movl(scratch, Address(ESP, base));
+ __ call(Address(scratch, offset));
+}
+
+void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
+ __ fs()->call(Address::Absolute(offset));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ fs()->movl(tr.AsX86().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+ __ movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception as argument in EAX
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
new file mode 100644
index 0000000..3f07ede
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+#define ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+
+#include <vector>
+
+#include "assembler_x86.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86 {
+
+class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+ public:
+ explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ virtual ~X86JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 977ce9d..ddc8244 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2639,547 +2639,6 @@
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86_64Core(static_cast<int>(reg));
-}
-static dwarf::Reg DWARFReg(FloatRegister reg) {
- return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 8;
-
-void X86_64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(8); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- pushq(spill.AsCpuRegister());
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
- }
- }
- // return address then method on stack.
- int64_t rest_of_frame = static_cast<int64_t>(frame_size)
- - (gpr_count * kFramePointerSize)
- - kFramePointerSize /*return address*/;
- subq(CpuRegister(RSP), Immediate(rest_of_frame));
- cfi_.AdjustCFAOffset(rest_of_frame);
-
- // spill xmms
- int64_t offset = rest_of_frame;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset -= sizeof(double);
- movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
- cfi_.RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
- }
- }
-
- static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
- "Unexpected frame pointer size.");
-
- movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86_64().IsCpuRegister()) {
- if (spill.getSize() == 8) {
- movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
- spill.AsX86_64().AsCpuRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
- }
- } else {
- if (spill.getSize() == 8) {
- movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- }
- }
- }
-}
-
-void X86_64Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- int gpr_count = 0;
- // unspill xmms
- int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize;
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset += sizeof(double);
- movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
- cfi_.Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
- } else {
- gpr_count++;
- }
- }
- int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- popq(spill.AsCpuRegister());
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
- }
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- } else {
- CHECK_EQ(8u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- }
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
- movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(CpuRegister(RSP), offs));
- } else {
- fstpl(Address(CpuRegister(RSP), offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- } else {
- movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) {
- gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
- gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
-}
-
-void X86_64Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- } else {
- CHECK_EQ(8u, size);
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- }
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
- movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(CpuRegister(RSP), src));
- } else {
- fldl(Address(CpuRegister(RSP), src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- } else {
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- }
- }
-}
-
-void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- gs()->flds(Address::Absolute(src, true));
- } else {
- gs()->fldl(Address::Absolute(src, true));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
- } else {
- gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
- }
- }
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
-}
-
-void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
-}
-
-void X86_64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movq(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(CpuRegister(RSP), Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(CpuRegister(RSP), 0));
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(CpuRegister(RSP), 0));
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- }
- addq(CpuRegister(RSP), Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
- movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
- Store(fr_offs, scratch, 8);
-}
-
-void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 8);
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
- size_t size) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86_64Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushq(Address(CpuRegister(RSP), src));
- popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movq(scratch, Address(CpuRegister(RSP), src_base));
- movq(scratch, Address(scratch, src_offset));
- movq(Address(CpuRegister(RSP), dest), scratch);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
- popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movq(scratch, Address(CpuRegister(RSP), src));
- pushq(Address(scratch, src_offset));
- popq(Address(scratch, dest_offset));
-}
-
-void X86_64Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of null.
- in_reg = out_reg;
- // TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- Store(out_off, scratch, 8);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86_64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86_64ManagedRegister base = mbase.AsX86_64();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movq(scratch, Address(CpuRegister(RSP), base));
- call(Address(scratch, offset));
-}
-
-void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
- gs()->call(Address::Absolute(offset, true));
-}
-
-void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
-}
-
-void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
- movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
-}
-
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
-void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
- X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI),
- Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86_64Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
for (size_t i = 0, e = area.size(); i < e; i++) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 52e39cf..370f49c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -28,6 +28,7 @@
#include "offsets.h"
#include "utils/array_ref.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
namespace art {
namespace x86_64 {
@@ -699,125 +700,6 @@
}
void Bind(NearLabel* label);
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
-
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src);
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 788c725..36c966b 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -22,7 +22,9 @@
#include "base/bit_utils.h"
#include "base/stl_util.h"
+#include "jni_macro_assembler_x86_64.h"
#include "utils/assembler_test.h"
+#include "utils/jni_macro_assembler_test.h"
namespace art {
@@ -1485,6 +1487,62 @@
DriverFn(&setcc_test_fn, "setcc");
}
+TEST_F(AssemblerX86_64Test, MovzxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
+}
+
+TEST_F(AssemblerX86_64Test, Repnescasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "Repnescasw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsl) {
+ GetAssembler()->repe_cmpsl();
+ const char* expected = "repe cmpsl\n";
+ DriverStr(expected, "Repecmpsl");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsq) {
+ GetAssembler()->repe_cmpsq();
+ const char* expected = "repe cmpsq\n";
+ DriverStr(expected, "Repecmpsq");
+}
+
+TEST_F(AssemblerX86_64Test, Cmpb) {
+ GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+ x86_64::Immediate(0));
+ const char* expected = "cmpb $0, 128(%RDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
+class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler> {
+ public:
+ using Base = JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler>;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "x86_64";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
+ }
+
+ private:
+};
+
static x86_64::X86_64ManagedRegister ManagedFromCpu(x86_64::Register r) {
return x86_64::X86_64ManagedRegister::FromCpuRegister(r);
}
@@ -1493,8 +1551,8 @@
return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
}
-std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1536,12 +1594,12 @@
return str.str();
}
-TEST_F(AssemblerX86_64Test, BuildFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, BuildFrame) {
DriverFn(&buildframe_test_fn, "BuildFrame");
}
-std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1567,12 +1625,13 @@
return str.str();
}
-TEST_F(AssemblerX86_64Test, RemoveFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, RemoveFrame) {
DriverFn(&removeframe_test_fn, "RemoveFrame");
}
-std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->IncreaseFrameSize(0U);
assembler->IncreaseFrameSize(kStackAlignment);
assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -1586,12 +1645,13 @@
return str.str();
}
-TEST_F(AssemblerX86_64Test, IncreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, IncreaseFrame) {
DriverFn(&increaseframe_test_fn, "IncreaseFrame");
}
-std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->DecreaseFrameSize(0U);
assembler->DecreaseFrameSize(kStackAlignment);
assembler->DecreaseFrameSize(10 * kStackAlignment);
@@ -1605,47 +1665,8 @@
return str.str();
}
-TEST_F(AssemblerX86_64Test, DecreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, DecreaseFrame) {
DriverFn(&decreaseframe_test_fn, "DecreaseFrame");
}
-TEST_F(AssemblerX86_64Test, MovzxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
-}
-
-TEST_F(AssemblerX86_64Test, MovsxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
-}
-
-TEST_F(AssemblerX86_64Test, Repnescasw) {
- GetAssembler()->repne_scasw();
- const char* expected = "repne scasw\n";
- DriverStr(expected, "Repnescasw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsw) {
- GetAssembler()->repe_cmpsw();
- const char* expected = "repe cmpsw\n";
- DriverStr(expected, "Repecmpsw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsl) {
- GetAssembler()->repe_cmpsl();
- const char* expected = "repe cmpsl\n";
- DriverStr(expected, "Repecmpsl");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsq) {
- GetAssembler()->repe_cmpsq();
- const char* expected = "repe cmpsq\n";
- DriverStr(expected, "Repecmpsq");
-}
-
-TEST_F(AssemblerX86_64Test, Cmpb) {
- GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
- x86_64::Immediate(0));
- const char* expected = "cmpb $0, 128(%RDI)\n";
- DriverStr(expected, "cmpb");
-}
-
} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
new file mode 100644
index 0000000..47fb59b
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86_64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86_64 {
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+ return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 8;
+
+#define __ asm_.
+
+void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(8); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ pushq(spill.AsCpuRegister());
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
+ }
+ }
+ // return address then method on stack.
+ int64_t rest_of_frame = static_cast<int64_t>(frame_size)
+ - (gpr_count * kFramePointerSize)
+ - kFramePointerSize /*return address*/;
+ __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
+ cfi().AdjustCFAOffset(rest_of_frame);
+
+ // spill xmms
+ int64_t offset = rest_of_frame;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset -= sizeof(double);
+ __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
+ cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
+ }
+ }
+
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
+
+ __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86_64().IsCpuRegister()) {
+ if (spill.getSize() == 8) {
+ __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ }
+ } else {
+ if (spill.getSize() == 8) {
+ __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ int gpr_count = 0;
+ // unspill xmms
+ int64_t offset = static_cast<int64_t>(frame_size)
+ - (spill_regs.size() * kFramePointerSize)
+ - 2 * kFramePointerSize;
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset += sizeof(double);
+ __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
+ cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
+ } else {
+ gpr_count++;
+ }
+ }
+ int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
+ __ addq(CpuRegister(RSP), Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ popq(spill.AsCpuRegister());
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
+ }
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addq(CpuRegister(RSP), Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(adjust, &asm_);
+}
+
+void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ }
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
+ __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
+ src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(CpuRegister(RSP), offs));
+ } else {
+ __ fstpl(Address(CpuRegister(RSP), offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister) {
+ __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
+}
+
+void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
+ __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+}
+
+void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ }
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
+ __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(CpuRegister(RSP), src));
+ } else {
+ __ fldl(Address(CpuRegister(RSP), src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
+ ThreadOffset64 src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ gs()->flds(Address::Absolute(src, true));
+ } else {
+ __ gs()->fldl(Address::Absolute(src, true));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
+ } else {
+ __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
+}
+
+void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(CpuRegister(RSP), Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(CpuRegister(RSP), 0));
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(CpuRegister(RSP), 0));
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ }
+ __ addq(CpuRegister(RSP), Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
+ Store(fr_offs, scratch, 8);
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 8);
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushq(Address(CpuRegister(RSP), src));
+ __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movq(scratch, Address(CpuRegister(RSP), src_base));
+ __ movq(scratch, Address(scratch, src_offset));
+ __ movq(Address(CpuRegister(RSP), dest), scratch);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
+ __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movq(scratch, Address(CpuRegister(RSP), src));
+ __ pushq(Address(scratch, src_offset));
+ __ popq(Address(scratch, dest_offset));
+}
+
+void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
+ // Use out_reg as indicator of null.
+ in_reg = out_reg;
+ // TODO: movzwl
+ __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ Store(out_off, scratch, 8);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86_64ManagedRegister base = mbase.AsX86_64();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ __ movq(scratch, Address(CpuRegister(RSP), base));
+ __ call(Address(scratch, offset));
+}
+
+void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
+ __ gs()->call(Address::Absolute(offset, true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ __ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+ __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86_64ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
+ }
+ // Pass exception as argument in RDI
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
new file mode 100644
index 0000000..cc4e57c
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+
+#include <vector>
+
+#include "assembler_x86_64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+ PointerSize::k64> {
+ public:
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ virtual ~X86_64JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0d1d4d7..eb11f6d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -516,6 +516,7 @@
compiled_classes_filename_(nullptr),
compiled_methods_zip_filename_(nullptr),
compiled_methods_filename_(nullptr),
+ passes_to_run_filename_(nullptr),
app_image_(false),
boot_image_(false),
multi_image_(false),
@@ -894,6 +895,16 @@
}
}
compiler_options_->force_determinism_ = force_determinism_;
+
+ if (passes_to_run_filename_ != nullptr) {
+ passes_to_run_.reset(ReadCommentedInputFromFile<std::vector<std::string>>(
+ passes_to_run_filename_,
+ nullptr)); // No post-processing.
+ if (passes_to_run_.get() == nullptr) {
+ Usage("Failed to read list of passes to run.");
+ }
+ }
+ compiler_options_->passes_to_run_ = passes_to_run_.get();
}
static bool SupportsDeterministicCompilation() {
@@ -1093,6 +1104,8 @@
compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
} else if (option.starts_with("--compiled-methods-zip=")) {
compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
+ } else if (option.starts_with("--run-passes=")) {
+ passes_to_run_filename_ = option.substr(strlen("--run-passes=")).data();
} else if (option.starts_with("--base=")) {
ParseBase(option);
} else if (option.starts_with("--boot-image=")) {
@@ -2106,13 +2119,15 @@
if (compiled_methods_filename_ != nullptr) {
std::string error_msg;
if (compiled_methods_zip_filename_ != nullptr) {
- compiled_methods_.reset(ReadCommentedInputFromZip(compiled_methods_zip_filename_,
- compiled_methods_filename_,
- nullptr, // No post-processing.
- &error_msg));
+ compiled_methods_.reset(ReadCommentedInputFromZip<std::unordered_set<std::string>>(
+ compiled_methods_zip_filename_,
+ compiled_methods_filename_,
+ nullptr, // No post-processing.
+ &error_msg));
} else {
- compiled_methods_.reset(ReadCommentedInputFromFile(compiled_methods_filename_,
- nullptr)); // No post-processing.
+ compiled_methods_.reset(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ compiled_methods_filename_,
+ nullptr)); // No post-processing.
}
if (compiled_methods_.get() == nullptr) {
LOG(ERROR) << "Failed to create list of compiled methods from '"
@@ -2346,7 +2361,8 @@
static std::unordered_set<std::string>* ReadImageClassesFromFile(
const char* image_classes_filename) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromFile(image_classes_filename, &process);
+ return ReadCommentedInputFromFile<std::unordered_set<std::string>>(image_classes_filename,
+ &process);
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
@@ -2355,27 +2371,32 @@
const char* image_classes_filename,
std::string* error_msg) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromZip(zip_filename, image_classes_filename, &process, error_msg);
+ return ReadCommentedInputFromZip<std::unordered_set<std::string>>(zip_filename,
+ image_classes_filename,
+ &process,
+ error_msg);
}
// Read lines from the given file, dropping comments and empty lines. Post-process each line with
// the given function.
- static std::unordered_set<std::string>* ReadCommentedInputFromFile(
+ template <typename T>
+ static T* ReadCommentedInputFromFile(
const char* input_filename, std::function<std::string(const char*)>* process) {
std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
if (input_file.get() == nullptr) {
LOG(ERROR) << "Failed to open input file " << input_filename;
return nullptr;
}
- std::unique_ptr<std::unordered_set<std::string>> result(
- ReadCommentedInputStream(*input_file, process));
+ std::unique_ptr<T> result(
+ ReadCommentedInputStream<T>(*input_file, process));
input_file->close();
return result.release();
}
// Read lines from the given file from the given zip file, dropping comments and empty lines.
// Post-process each line with the given function.
- static std::unordered_set<std::string>* ReadCommentedInputFromZip(
+ template <typename T>
+ static T* ReadCommentedInputFromZip(
const char* zip_filename,
const char* input_filename,
std::function<std::string(const char*)>* process,
@@ -2401,16 +2422,16 @@
const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
input_file->Size());
std::istringstream input_stream(input_string);
- return ReadCommentedInputStream(input_stream, process);
+ return ReadCommentedInputStream<T>(input_stream, process);
}
// Read lines from the given stream, dropping comments and empty lines. Post-process each line
// with the given function.
- static std::unordered_set<std::string>* ReadCommentedInputStream(
+ template <typename T>
+ static T* ReadCommentedInputStream(
std::istream& in_stream,
std::function<std::string(const char*)>* process) {
- std::unique_ptr<std::unordered_set<std::string>> image_classes(
- new std::unordered_set<std::string>);
+ std::unique_ptr<T> output(new T());
while (in_stream.good()) {
std::string dot;
std::getline(in_stream, dot);
@@ -2419,12 +2440,12 @@
}
if (process != nullptr) {
std::string descriptor((*process)(dot.c_str()));
- image_classes->insert(descriptor);
+ output->insert(output->end(), descriptor);
} else {
- image_classes->insert(dot);
+ output->insert(output->end(), dot);
}
}
- return image_classes.release();
+ return output.release();
}
void LogCompletionTime() {
@@ -2501,9 +2522,11 @@
const char* compiled_classes_filename_;
const char* compiled_methods_zip_filename_;
const char* compiled_methods_filename_;
+ const char* passes_to_run_filename_;
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
+ std::unique_ptr<std::vector<std::string>> passes_to_run_;
bool app_image_;
bool boot_image_;
bool multi_image_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 8c3c5e5..a0def61 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -76,6 +76,7 @@
"kCalleeSaveMethod",
"kRefsOnlySaveMethod",
"kRefsAndArgsSaveMethod",
+ "kSaveEverythingMethod",
};
const char* image_roots_descriptions_[] = {
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 63dc476..db97055 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -104,11 +104,13 @@
// We must set --android-root.
int link[2];
if (pipe(link) == -1) {
+ *error_msg = strerror(errno);
return false;
}
const pid_t pid = fork();
if (pid == -1) {
+ *error_msg = strerror(errno);
return false;
}
@@ -116,10 +118,19 @@
dup2(link[1], STDOUT_FILENO);
close(link[0]);
close(link[1]);
- bool res = ::art::Exec(exec_argv, error_msg);
- // Delete the runtime to prevent memory leaks and please valgrind.
- delete Runtime::Current();
- exit(res ? 0 : 1);
+ // change process groups, so we don't get reaped by ProcessManager
+ setpgid(0, 0);
+ // Use execv here rather than art::Exec to avoid blocking on waitpid here.
+ std::vector<char*> argv;
+ for (size_t i = 0; i < exec_argv.size(); ++i) {
+ argv.push_back(const_cast<char*>(exec_argv[i].c_str()));
+ }
+ argv.push_back(nullptr);
+ UNUSED(execv(argv[0], &argv[0]));
+ const std::string command_line(Join(exec_argv, ' '));
+ PLOG(ERROR) << "Failed to execv(" << command_line << ")";
+ // _exit to avoid atexit handlers in child.
+ _exit(1);
} else {
close(link[1]);
static const size_t kLineMax = 256;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 5c94862..2f8b113 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -127,6 +127,7 @@
mirror/throwable.cc \
monitor.cc \
native_bridge_art_interface.cc \
+ native_stack_dump.cc \
native/dalvik_system_DexFile.cc \
native/dalvik_system_VMDebug.cc \
native/dalvik_system_VMRuntime.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index ee31c58..6d80eb6 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -69,7 +69,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace arm
namespace arm64 {
#include "arch/arm64/asm_support_arm64.h"
@@ -79,7 +81,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace arm64
namespace mips {
#include "arch/mips/asm_support_mips.h"
@@ -89,7 +93,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace mips
namespace mips64 {
#include "arch/mips64/asm_support_mips64.h"
@@ -99,7 +105,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace mips64
namespace x86 {
#include "arch/x86/asm_support_x86.h"
@@ -109,7 +117,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace x86
namespace x86_64 {
#include "arch/x86_64/asm_support_x86_64.h"
@@ -119,13 +129,18 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace x86_64
// Check architecture specific constants are sound.
TEST_F(ArchTest, ARM) {
CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, arm::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, arm::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, arm::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kArm,
+ Runtime::kSaveEverything,
+ arm::kFrameSizeSaveEverythingCalleeSave);
}
@@ -134,33 +149,51 @@
CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, arm64::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs,
arm64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kArm64,
+ Runtime::kSaveEverything,
+ arm64::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, MIPS) {
CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, mips::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, mips::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kMips,
+ Runtime::kRefsAndArgs,
mips::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kMips,
+ Runtime::kSaveEverything,
+ mips::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, MIPS64) {
CheckFrameSize(InstructionSet::kMips64, Runtime::kSaveAll, mips64::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kMips64, Runtime::kRefsOnly, mips64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips64, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kMips64,
+ Runtime::kRefsAndArgs,
mips64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kMips64,
+ Runtime::kSaveEverything,
+ mips64::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, X86) {
CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, x86::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, x86::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, x86::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kX86,
+ Runtime::kSaveEverything,
+ x86::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, X86_64) {
CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, x86_64::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, x86_64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kX86_64,
+ Runtime::kRefsAndArgs,
x86_64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kX86_64,
+ Runtime::kSaveEverything,
+ x86_64::kFrameSizeSaveEverythingCalleeSave);
}
} // namespace art
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 1fa566b..67f6f7a 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -22,6 +22,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 112
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 112
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 192
// Flag for enabling R4 optimization in arm runtime
// #define ARM_R4_SUSPEND_FLAG
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 34d3158..42418ad 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -168,6 +168,65 @@
.cfi_adjust_cfa_offset -40
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME rTemp
+ push {r0-r12, lr} @ 14 words of callee saves and args.
+ .cfi_adjust_cfa_offset 56
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset r4, 16
+ .cfi_rel_offset r5, 20
+ .cfi_rel_offset r6, 24
+ .cfi_rel_offset r7, 28
+ .cfi_rel_offset r8, 32
+ .cfi_rel_offset r9, 36
+ .cfi_rel_offset r10, 40
+ .cfi_rel_offset r11, 44
+ .cfi_rel_offset ip, 48
+ .cfi_rel_offset lr, 52
+ vpush {s0-s31} @ 32 words of float args.
+ .cfi_adjust_cfa_offset 128
+ sub sp, #8 @ 2 words of space, alignment padding and Method*
+ .cfi_adjust_cfa_offset 8
+ RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
+ @ Load kSaveEverything Method* to rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET]
+ str \rTemp, [sp, #0] @ Store kSaveEverything Method* to the bottom of the stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 56 + 128 + 8)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#endif
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ add sp, #8 @ rewind sp
+ .cfi_adjust_cfa_offset -8
+ vpop {s0-s31}
+ .cfi_adjust_cfa_offset -128
+ pop {r0-r12, lr} @ 14 words of callee saves
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r9
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_restore r12
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -56
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -520,7 +579,7 @@
ldr r2, [r9, #THREAD_ID_OFFSET]
ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
mov r3, r1
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
cbnz r3, .Lnot_unlocked @ already thin locked
@ unlocked case - r1: original lock word that's zero except for the read barrier bits.
orr r2, r1, r2 @ r2 holds thread id with count of 0 with preserved read barrier bits
@@ -536,9 +595,9 @@
cbnz r2, .Lslow_lock @ lock word and self thread id's match -> recursive lock
@ else contention, go to slow path
mov r3, r1 @ copy the lock word to check count overflow.
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits.
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits.
add r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count in lock word placing in r2 to check overflow
- lsr r3, r2, LOCK_WORD_READ_BARRIER_STATE_SHIFT @ if either of the upper two bits (28-29) are set, we overflowed.
+ lsr r3, r2, #LOCK_WORD_GC_STATE_SHIFT @ if the first gc state bit is set, we overflowed.
cbnz r3, .Lslow_lock @ if we overflow the count go slow path
add r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count for real
strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
@@ -581,17 +640,17 @@
cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
ldr r2, [r9, #THREAD_ID_OFFSET]
mov r3, r1 @ copy lock word to check thread id equality
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
eor r3, r3, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r3, r3 @ zero top 16 bits
cbnz r3, .Lslow_unlock @ do lock word and self thread id's match?
mov r3, r1 @ copy lock word to detect transition to unlocked
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
cmp r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
@ transition to unlocked
mov r3, r1
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK @ r3: zero except for the preserved read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED @ r3: zero except for the preserved gc bits
dmb ish @ full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
@@ -1212,17 +1271,18 @@
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
#ifdef ARM_R4_SUSPEND_FLAG
- ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
- mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
- cbnz r0, 1f @ check Thread::Current()->suspend_count_ == 0
- bx lr @ return if suspend_count_ == 0
+ ldrh rSUSPEND, [rSELF, #THREAD_FLAGS_OFFSET]
+ cbnz rSUSPEND, 1f @ check Thread::Current()->suspend_count_ == 0
+ mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ bx lr @ return if suspend_count_ == 0
1:
+ mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
#endif
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME r0 @ save everything for GC stack crawl
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for GC stack crawl
- @ TODO: save FPRs to enable access in the debugger?
- bl artTestSuspendFromCode @ (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ bl artTestSuspendFromCode @ (Thread*)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ bx lr
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
@@ -1772,6 +1832,20 @@
*/
.macro READ_BARRIER_MARK_REG name, reg
ENTRY \name
+ // Null check so that we can load the lock word.
+ cmp \reg, #0
+ beq .Lret_rb_\name
+ // Check lock word for mark bit, if marked return.
+ push {r0}
+ ldr r0, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ and r0, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
+ cbz r0, .Lslow_rb_\name
+ // Restore LR and return.
+ pop {r0}
+ bx lr
+
+.Lslow_rb_\name:
+ pop {r0}
push {r0-r4, r9, r12, lr} @ save return address and core caller-save registers
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r0, 0
@@ -1831,6 +1905,8 @@
.endif
.endif
pop {r0-r4, r9, r12, pc} @ restore caller-save registers and return
+.Lret_rb_\name:
+ bx lr
END \name
.endm
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 0fb8a63..c474d2e 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -34,6 +34,9 @@
(1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
static constexpr uint32_t kArmCalleeSaveAllSpills =
(1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveEverythingSpills =
+ (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
+ (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
@@ -47,17 +50,21 @@
(1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
(1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
(1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
+ kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 989ecc6..68d12e9 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -22,5 +22,6 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 512
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index a5be52d..439f8d4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -316,6 +316,204 @@
.cfi_adjust_cfa_offset -224
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ sub sp, sp, #512
+ .cfi_adjust_cfa_offset 512
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 512)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
+ // Save FP registers.
+ stp d0, d1, [sp, #8]
+ stp d2, d3, [sp, #24]
+ stp d4, d5, [sp, #40]
+ stp d6, d7, [sp, #56]
+ stp d8, d9, [sp, #72]
+ stp d10, d11, [sp, #88]
+ stp d12, d13, [sp, #104]
+ stp d14, d15, [sp, #120]
+ stp d16, d17, [sp, #136]
+ stp d18, d19, [sp, #152]
+ stp d20, d21, [sp, #168]
+ stp d22, d23, [sp, #184]
+ stp d24, d25, [sp, #200]
+ stp d26, d27, [sp, #216]
+ stp d28, d29, [sp, #232]
+ stp d30, d31, [sp, #248]
+
+ // Save core registers.
+ str x0, [sp, #264]
+ .cfi_rel_offset x0, 264
+
+ stp x1, x2, [sp, #272]
+ .cfi_rel_offset x1, 272
+ .cfi_rel_offset x2, 280
+
+ stp x3, x4, [sp, #288]
+ .cfi_rel_offset x3, 288
+ .cfi_rel_offset x4, 296
+
+ stp x5, x6, [sp, #304]
+ .cfi_rel_offset x5, 304
+ .cfi_rel_offset x6, 312
+
+ stp x7, x8, [sp, #320]
+ .cfi_rel_offset x7, 320
+ .cfi_rel_offset x8, 328
+
+ stp x9, x10, [sp, #336]
+ .cfi_rel_offset x9, 336
+ .cfi_rel_offset x10, 344
+
+ stp x11, x12, [sp, #352]
+ .cfi_rel_offset x11, 352
+ .cfi_rel_offset x12, 360
+
+ stp x13, x14, [sp, #368]
+ .cfi_rel_offset x13, 368
+ .cfi_rel_offset x14, 376
+
+ stp x15, x16, [sp, #384]
+ .cfi_rel_offset x15, 384
+ .cfi_rel_offset x16, 392
+
+ stp x17, x18, [sp, #400]
+ .cfi_rel_offset x17, 400
+ .cfi_rel_offset x18, 408
+
+ stp x19, x20, [sp, #416]
+ .cfi_rel_offset x19, 416
+ .cfi_rel_offset x20, 424
+
+ stp x21, x22, [sp, #432]
+ .cfi_rel_offset x21, 432
+ .cfi_rel_offset x22, 440
+
+ stp x23, x24, [sp, #448]
+ .cfi_rel_offset x23, 448
+ .cfi_rel_offset x24, 456
+
+ stp x25, x26, [sp, #464]
+ .cfi_rel_offset x25, 464
+ .cfi_rel_offset x26, 472
+
+ stp x27, x28, [sp, #480]
+ .cfi_rel_offset x27, 480
+ .cfi_rel_offset x28, 488
+
+ stp x29, xLR, [sp, #496]
+ .cfi_rel_offset x29, 496
+ .cfi_rel_offset x30, 504
+
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
+
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kSaveEverything] .
+ // Loads appropriate callee-save-method.
+ ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET ]
+
+ // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything].
+ str xIP0, [sp]
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ // Restore FP registers.
+ ldp d0, d1, [sp, #8]
+ ldp d2, d3, [sp, #24]
+ ldp d4, d5, [sp, #40]
+ ldp d6, d7, [sp, #56]
+ ldp d8, d9, [sp, #72]
+ ldp d10, d11, [sp, #88]
+ ldp d12, d13, [sp, #104]
+ ldp d14, d15, [sp, #120]
+ ldp d16, d17, [sp, #136]
+ ldp d18, d19, [sp, #152]
+ ldp d20, d21, [sp, #168]
+ ldp d22, d23, [sp, #184]
+ ldp d24, d25, [sp, #200]
+ ldp d26, d27, [sp, #216]
+ ldp d28, d29, [sp, #232]
+ ldp d30, d31, [sp, #248]
+
+ // Restore core registers.
+ ldr x0, [sp, #264]
+ .cfi_restore x0
+
+ ldp x1, x2, [sp, #272]
+ .cfi_restore x1
+ .cfi_restore x2
+
+ ldp x3, x4, [sp, #288]
+ .cfi_restore x3
+ .cfi_restore x4
+
+ ldp x5, x6, [sp, #304]
+ .cfi_restore x5
+ .cfi_restore x6
+
+ ldp x7, x8, [sp, #320]
+ .cfi_restore x7
+ .cfi_restore x8
+
+ ldp x9, x10, [sp, #336]
+ .cfi_restore x9
+ .cfi_restore x10
+
+ ldp x11, x12, [sp, #352]
+ .cfi_restore x11
+ .cfi_restore x12
+
+ ldp x13, x14, [sp, #368]
+ .cfi_restore x13
+ .cfi_restore x14
+
+ ldp x15, x16, [sp, #384]
+ .cfi_restore x15
+ .cfi_restore x16
+
+ ldp x17, x18, [sp, #400]
+ .cfi_restore x17
+ .cfi_restore x18
+
+ ldp x19, x20, [sp, #416]
+ .cfi_restore x19
+ .cfi_restore x20
+
+ ldp x21, x22, [sp, #432]
+ .cfi_restore x21
+ .cfi_restore x22
+
+ ldp x23, x24, [sp, #448]
+ .cfi_restore x23
+ .cfi_restore x24
+
+ ldp x25, x26, [sp, #464]
+ .cfi_restore x25
+ .cfi_restore x26
+
+ ldp x27, x28, [sp, #480]
+ .cfi_restore x27
+ .cfi_restore x28
+
+ ldp x29, xLR, [sp, #496]
+ .cfi_restore x29
+ .cfi_restore x30
+
+ add sp, sp, #512
+ .cfi_adjust_cfa_offset -512
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz x0, 1f // result non-zero branch over
ret // return
@@ -1090,7 +1288,7 @@
ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
ldxr w1, [x4]
mov x3, x1
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cbnz w3, .Lnot_unlocked // already thin locked
// unlocked case - x1: original lock word that's zero except for the read barrier bits.
orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
@@ -1106,9 +1304,9 @@
cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
// else contention, go to slow path
mov x3, x1 // copy the lock word to check count overflow.
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits.
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
- lsr w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT // if either of the upper two bits (28-29) are set, we overflowed.
+ lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed.
cbnz w3, .Lslow_lock // if we overflow the count go slow path
add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real
stxr w3, w2, [x4]
@@ -1152,17 +1350,17 @@
cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
ldr w2, [xSELF, #THREAD_ID_OFFSET]
mov x3, x1 // copy lock word to check thread id equality
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
uxth w3, w3 // zero top 16 bits
cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
mov x3, x1 // copy lock word to detect transition to unlocked
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
// transition to unlocked
mov x3, x1
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK // w3: zero except for the preserved read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
dmb ish // full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str w3, [x4]
@@ -1590,7 +1788,20 @@
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+// Comment out allocators that have arm64 specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
@@ -1697,6 +1908,71 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Check null class
+ cbz \wClass, \slowPathLabel
+ ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2
+.endm
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+ // Array classes are never finalizable or uninitialized, no need to check.
+ ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+ UNPOISON_HEAP_REF \wTemp0
+ ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+ lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+ // bits.
+ // xCount is holding a 32 bit value,
+ // it can not overflow.
+ lsl \xTemp1, \xCount, \xTemp0 // Calculate data size
+ // Add array data offset and alignment.
+ add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+ add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the
+ // component size shift is 3
+ // (for 64 bit alignment).
+ and \xTemp0, \xTemp0, #4
+ add \xTemp1, \xTemp1, \xTemp0
+ and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED // Round up the object size by the
+ // object alignment. (addr + 7) & ~7.
+ // Add by 7 is done above.
+
+ cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow
+ bhs \slowPathLabel // path.
+
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that
+ // we use (end - begin) to handle
+ // negative size arrays. It is
+ // assumed that a negative size will
+ // always be greater unsigned than
+ // region size.
+ ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
+ sub \xTemp2, \xTemp2, \xTemp0
+ cmp \xTemp1, \xTemp2
+ bhi \slowPathLabel
+
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+ // Move old thread_local_pos to x0
+ // for the return value.
+ mov x0, \xTemp0
+ add \xTemp0, \xTemp0, \xTemp1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ add \xTemp0, \xTemp0, #1
+ str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+ POISON_HEAP_REF \wClass
+ str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
+ // Fence.
+ dmb ishst
+ ret
+.endm
+
// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
//
// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
@@ -1704,8 +1980,11 @@
// Need to preserve x0 and x1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz x2, \slowPathLabel // Check null class
- // Check class status.
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+ ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET] // Check class status.
cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
bne \slowPathLabel
// Add a fake dependence from the
@@ -1718,6 +1997,10 @@
// a load-acquire for the status).
eor x3, x3, x3
add x2, x2, x3
+ ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
// Check access flags has
// kAccClassIsFinalizable.
ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
@@ -1779,24 +2062,37 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
// Fast path region tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+ // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
// x2-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
+.if \is_resolved
+ mov x2, x0 // class is actually stored in x0 already
+.else
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // Read barrier for class load.
+.endif
+ // Most common case: GC is not marking.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
+ cbnz x3, .Lmarking\name
+.Ldo_allocation\name:
+ \fast_path .Lslow_path\name
+.Lmarking\name:
+ // GC is marking, check the lock word of the class for the mark bit.
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w2, .Lslow_path\name
+ // Class is not null, check mark bit in lock word.
+ ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ // If the bit is not zero, do the allocation.
+ tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
// The read barrier slow path. Mark
// the class.
stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
@@ -1807,28 +2103,90 @@
ldp x0, x1, [sp, #0] // Restore registers.
ldr xLR, [sp, #16]
add sp, sp, #32
- b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
+ b .Ldo_allocation\name
+.Lslow_path\name:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
+ bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END \name
+.endm
+
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH, 0
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1
+
+// The common code for art_quick_alloc_array_*region_tlab
+.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
+ // Fast path array allocation for region tlab allocation.
+ // x0: uint32_t type_idx
+ // x1: int32_t component_count
+ // x2: ArtMethod* method
+ // x3-x7: free.
+#if !defined(USE_READ_BARRIER)
+ mvn x0, xzr // Read barrier must be enabled here.
+ ret // Return -1.
+#endif
+.if \is_resolved
+ mov x3, x0
+ // If already resolved, class is stored in x0
+.else
+ ldr x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
+ // Load the class (x2)
+ ldr w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+.endif
+ // Most common case: GC is not marking.
+ ldr w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz x4, .Lmarking\name
+.Ldo_allocation\name:
+ \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
+.Lmarking\name:
+ // GC is marking, check the lock word of the class for the mark bit.
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w3, .Lslow_path\name
+ // Class is not null, check mark bit in lock word.
+ ldr w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ // If the bit is not zero, do the allocation.
+ tbnz w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
+ // The read barrier slow path. Mark
+ // the class.
+ stp x0, x1, [sp, #-32]! // Save registers (x0, x1, x2, lr).
+ stp x2, xLR, [sp, #16]
+ mov x0, x3 // Pass the class as the first param.
+ bl artReadBarrierMark
+ mov x3, x0 // Get the (marked) class back.
+ ldp x2, xLR, [sp, #16]
+ ldp x0, x1, [sp], #32 // Restore registers.
+ b .Ldo_allocation\name
+.Lslow_path\name:
+ // x0: uint32_t type_idx / mirror::Class* klass (if resolved)
+ // x1: int32_t component_count
+ // x2: ArtMethod* method
+ // x3: Thread* self
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ mov x3, xSELF // pass Thread::Current
+ bl \entrypoint
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \name
+.endm
+
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0
+// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler.
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1
/*
* Called by managed code when the thread has been asked to suspend.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
- ldrh w0, [xSELF, #THREAD_FLAGS_OFFSET] // get xSELF->state_and_flags.as_struct.flags
- cbnz w0, .Lneed_suspend // check flags == 0
- ret // return if flags == 0
-.Lneed_suspend:
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save callee saves for stack crawl
mov x0, xSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
bl artTestSuspendFromCode // (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ ret
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
@@ -2265,6 +2623,8 @@
*/
.macro READ_BARRIER_MARK_REG name, wreg, xreg
ENTRY \name
+ // Reference is null, no work to do at all.
+ cbz \wreg, .Lret_rb_\name
/*
* Allocate 46 stack slots * 8 = 368 bytes:
* - 20 slots for core registers X0-X19
@@ -2272,6 +2632,11 @@
* - 1 slot for return address register XLR
* - 1 padding slot for 16-byte stack alignment
*/
+ // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
+ ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_path_rb_\name
+ ret
+.Lslow_path_rb_\name:
// Save all potentially live caller-save core registers.
stp x0, x1, [sp, #-368]!
.cfi_adjust_cfa_offset 368
@@ -2360,6 +2725,7 @@
.cfi_restore x30
add sp, sp, #368
.cfi_adjust_cfa_offset -368
+.Lret_rb_\name:
ret
END \name
.endm
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index b3d250b..188e46e 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -29,7 +29,7 @@
static constexpr uint32_t kArm64CalleeSaveAlwaysSpills =
// Note: ArtMethod::GetReturnPcOffsetInBytes() rely on the assumption that
// LR is always saved on the top of the frame for all targets.
- // That is, lr = *(sp + framesize - pointsize).
+ // That is, lr = *(sp + framesize - pointer_size).
(1 << art::arm64::LR);
// Callee saved registers
static constexpr uint32_t kArm64CalleeSaveRefSpills =
@@ -44,6 +44,14 @@
(1 << art::arm64::X7);
static constexpr uint32_t kArm64CalleeSaveAllSpills =
(1 << art::arm64::X19);
+static constexpr uint32_t kArm64CalleeSaveEverythingSpills =
+ (1 << art::arm64::X0) | (1 << art::arm64::X1) | (1 << art::arm64::X2) |
+ (1 << art::arm64::X3) | (1 << art::arm64::X4) | (1 << art::arm64::X5) |
+ (1 << art::arm64::X6) | (1 << art::arm64::X7) | (1 << art::arm64::X8) |
+ (1 << art::arm64::X9) | (1 << art::arm64::X10) | (1 << art::arm64::X11) |
+ (1 << art::arm64::X12) | (1 << art::arm64::X13) | (1 << art::arm64::X14) |
+ (1 << art::arm64::X15) | (1 << art::arm64::X16) | (1 << art::arm64::X17) |
+ (1 << art::arm64::X18) | (1 << art::arm64::X19);
static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
@@ -55,17 +63,31 @@
(1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
(1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
(1 << art::arm64::D14) | (1 << art::arm64::D15);
+static constexpr uint32_t kArm64CalleeSaveFpEverythingSpills =
+ (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
+ (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
+ (1 << art::arm64::D6) | (1 << art::arm64::D7) | (1 << art::arm64::D8) |
+ (1 << art::arm64::D9) | (1 << art::arm64::D10) | (1 << art::arm64::D11) |
+ (1 << art::arm64::D12) | (1 << art::arm64::D13) | (1 << art::arm64::D14) |
+ (1 << art::arm64::D15) | (1 << art::arm64::D16) | (1 << art::arm64::D17) |
+ (1 << art::arm64::D18) | (1 << art::arm64::D19) | (1 << art::arm64::D20) |
+ (1 << art::arm64::D21) | (1 << art::arm64::D22) | (1 << art::arm64::D23) |
+ (1 << art::arm64::D24) | (1 << art::arm64::D25) | (1 << art::arm64::D26) |
+ (1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
+ (1 << art::arm64::D30) | (1 << art::arm64::D31);
constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 453056d..2ef45f5 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -22,5 +22,6 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 96
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 48
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 80
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 256
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c1b8044..9b24128 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -277,6 +277,203 @@
.endm
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything).
+ * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
+ * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
+ * Clobbers $t0 and $t1.
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack.
+ * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -256
+ .cfi_adjust_cfa_offset 256
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 256)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#endif
+
+ sw $ra, 252($sp)
+ .cfi_rel_offset 31, 252
+ sw $fp, 248($sp)
+ .cfi_rel_offset 30, 248
+ sw $gp, 244($sp)
+ .cfi_rel_offset 28, 244
+ sw $t9, 240($sp)
+ .cfi_rel_offset 25, 240
+ sw $t8, 236($sp)
+ .cfi_rel_offset 24, 236
+ sw $s7, 232($sp)
+ .cfi_rel_offset 23, 232
+ sw $s6, 228($sp)
+ .cfi_rel_offset 22, 228
+ sw $s5, 224($sp)
+ .cfi_rel_offset 21, 224
+ sw $s4, 220($sp)
+ .cfi_rel_offset 20, 220
+ sw $s3, 216($sp)
+ .cfi_rel_offset 19, 216
+ sw $s2, 212($sp)
+ .cfi_rel_offset 18, 212
+ sw $s1, 208($sp)
+ .cfi_rel_offset 17, 208
+ sw $s0, 204($sp)
+ .cfi_rel_offset 16, 204
+ sw $t7, 200($sp)
+ .cfi_rel_offset 15, 200
+ sw $t6, 196($sp)
+ .cfi_rel_offset 14, 196
+ sw $t5, 192($sp)
+ .cfi_rel_offset 13, 192
+ sw $t4, 188($sp)
+ .cfi_rel_offset 12, 188
+ sw $t3, 184($sp)
+ .cfi_rel_offset 11, 184
+ sw $t2, 180($sp)
+ .cfi_rel_offset 10, 180
+ sw $t1, 176($sp)
+ .cfi_rel_offset 9, 176
+ sw $t0, 172($sp)
+ .cfi_rel_offset 8, 172
+ sw $a3, 168($sp)
+ .cfi_rel_offset 7, 168
+ sw $a2, 164($sp)
+ .cfi_rel_offset 6, 164
+ sw $a1, 160($sp)
+ .cfi_rel_offset 5, 160
+ sw $a0, 156($sp)
+ .cfi_rel_offset 4, 156
+ sw $v1, 152($sp)
+ .cfi_rel_offset 3, 152
+ sw $v0, 148($sp)
+ .cfi_rel_offset 2, 148
+
+ // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
+ bal 1f
+ .set push
+ .set noat
+ sw $at, 144($sp)
+ .cfi_rel_offset 1, 144
+ .set pop
+1:
+ .cpload $ra
+
+ SDu $f30, $f31, 136, $sp, $t1
+ SDu $f28, $f29, 128, $sp, $t1
+ SDu $f26, $f27, 120, $sp, $t1
+ SDu $f24, $f25, 112, $sp, $t1
+ SDu $f22, $f23, 104, $sp, $t1
+ SDu $f20, $f21, 96, $sp, $t1
+ SDu $f18, $f19, 88, $sp, $t1
+ SDu $f16, $f17, 80, $sp, $t1
+ SDu $f14, $f15, 72, $sp, $t1
+ SDu $f12, $f13, 64, $sp, $t1
+ SDu $f10, $f11, 56, $sp, $t1
+ SDu $f8, $f9, 48, $sp, $t1
+ SDu $f6, $f7, 40, $sp, $t1
+ SDu $f4, $f5, 32, $sp, $t1
+ SDu $f2, $f3, 24, $sp, $t1
+ SDu $f0, $f1, 16, $sp, $t1
+
+ # 3 words padding and 1 word for holding Method*
+
+ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
+ lw $t0, 0($t0)
+ lw $t0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t0)
+ sw $t0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
+ .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
+
+ LDu $f30, $f31, 136, $sp, $t1
+ LDu $f28, $f29, 128, $sp, $t1
+ LDu $f26, $f27, 120, $sp, $t1
+ LDu $f24, $f25, 112, $sp, $t1
+ LDu $f22, $f23, 104, $sp, $t1
+ LDu $f20, $f21, 96, $sp, $t1
+ LDu $f18, $f19, 88, $sp, $t1
+ LDu $f16, $f17, 80, $sp, $t1
+ LDu $f14, $f15, 72, $sp, $t1
+ LDu $f12, $f13, 64, $sp, $t1
+ LDu $f10, $f11, 56, $sp, $t1
+ LDu $f8, $f9, 48, $sp, $t1
+ LDu $f6, $f7, 40, $sp, $t1
+ LDu $f4, $f5, 32, $sp, $t1
+ LDu $f2, $f3, 24, $sp, $t1
+ LDu $f0, $f1, 16, $sp, $t1
+
+ lw $ra, 252($sp)
+ .cfi_restore 31
+ lw $fp, 248($sp)
+ .cfi_restore 30
+ lw $gp, 244($sp)
+ .cfi_restore 28
+ lw $t9, 240($sp)
+ .cfi_restore 25
+ lw $t8, 236($sp)
+ .cfi_restore 24
+ lw $s7, 232($sp)
+ .cfi_restore 23
+ lw $s6, 228($sp)
+ .cfi_restore 22
+ lw $s5, 224($sp)
+ .cfi_restore 21
+ lw $s4, 220($sp)
+ .cfi_restore 20
+ lw $s3, 216($sp)
+ .cfi_restore 19
+ lw $s2, 212($sp)
+ .cfi_restore 18
+ lw $s1, 208($sp)
+ .cfi_restore 17
+ lw $s0, 204($sp)
+ .cfi_restore 16
+ lw $t7, 200($sp)
+ .cfi_restore 15
+ lw $t6, 196($sp)
+ .cfi_restore 14
+ lw $t5, 192($sp)
+ .cfi_restore 13
+ lw $t4, 188($sp)
+ .cfi_restore 12
+ lw $t3, 184($sp)
+ .cfi_restore 11
+ lw $t2, 180($sp)
+ .cfi_restore 10
+ lw $t1, 176($sp)
+ .cfi_restore 9
+ lw $t0, 172($sp)
+ .cfi_restore 8
+ lw $a3, 168($sp)
+ .cfi_restore 7
+ lw $a2, 164($sp)
+ .cfi_restore 6
+ lw $a1, 160($sp)
+ .cfi_restore 5
+ lw $a0, 156($sp)
+ .cfi_restore 4
+ lw $v1, 152($sp)
+ .cfi_restore 3
+ lw $v0, 148($sp)
+ .cfi_restore 2
+ .set push
+ .set noat
+ lw $at, 144($sp)
+ .cfi_restore 1
+ .set pop
+
+ addiu $sp, $sp, 256 # pop frame
+ .cfi_adjust_cfa_offset -256
+.endm
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_
*/
@@ -1652,18 +1849,20 @@
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
-ENTRY art_quick_test_suspend
- lh $a0, THREAD_FLAGS_OFFSET(rSELF)
- bnez $a0, 1f
+ENTRY_NO_GP art_quick_test_suspend
+ lh rSUSPEND, THREAD_FLAGS_OFFSET(rSELF)
+ bnez rSUSPEND, 1f
addiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
nop
1:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
la $t9, artTestSuspendFromCode
- jalr $t9 # (Thread*)
+ jalr $t9 # (Thread*)
move $a0, rSELF
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ jalr $zero, $ra
+ nop
END art_quick_test_suspend
/*
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 7b0623b..170513d 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -34,6 +34,12 @@
(1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
static constexpr uint32_t kMipsCalleeSaveAllSpills =
(1 << art::mips::S0) | (1 << art::mips::S1);
+static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
+ (1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
+ (1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
+ (1 << art::mips::T0) | (1 << art::mips::T1) | (1 << art::mips::T2) | (1 << art::mips::T3) |
+ (1 << art::mips::T4) | (1 << art::mips::T5) | (1 << art::mips::T6) | (1 << art::mips::T7) |
+ (1 << art::mips::S0) | (1 << art::mips::S1) | (1 << art::mips::T8) | (1 << art::mips::T9);
static constexpr uint32_t kMipsCalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kMipsCalleeSaveFpRefSpills = 0;
@@ -43,17 +49,28 @@
(1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
(1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
(1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1 << art::mips::F31);
+static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
+ (1 << art::mips::F0) | (1 << art::mips::F1) | (1 << art::mips::F2) | (1 << art::mips::F3) |
+ (1 << art::mips::F4) | (1 << art::mips::F5) | (1 << art::mips::F6) | (1 << art::mips::F7) |
+ (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
+ (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
+ (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19) |
+ (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
+ (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
+ (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1 << art::mips::F31);
constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t MipsCalleeSaveFPSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0);
+ (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
index 06d6211..0a9ab7a 100644
--- a/runtime/arch/mips/thread_mips.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -25,7 +25,7 @@
void Thread::InitCpu() {
CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
index 995fcf3..2c16c25 100644
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ b/runtime/arch/mips64/asm_support_mips64.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 80
// $f12-$f19, $a1-$a7, $s2-$s7 + $gp + $s8 + $ra, 16 total + 1x8 bytes padding + method*
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 208
+// $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 496
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ae69620..3469de2 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -314,6 +314,228 @@
.endm
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything).
+ * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
+ * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
+ * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ daddiu $sp, $sp, -496
+ .cfi_adjust_cfa_offset 496
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 496)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#endif
+
+ // Save core registers.
+ sd $ra, 488($sp)
+ .cfi_rel_offset 31, 488
+ sd $s8, 480($sp)
+ .cfi_rel_offset 30, 480
+ sd $t9, 464($sp)
+ .cfi_rel_offset 25, 464
+ sd $t8, 456($sp)
+ .cfi_rel_offset 24, 456
+ sd $s7, 448($sp)
+ .cfi_rel_offset 23, 448
+ sd $s6, 440($sp)
+ .cfi_rel_offset 22, 440
+ sd $s5, 432($sp)
+ .cfi_rel_offset 21, 432
+ sd $s4, 424($sp)
+ .cfi_rel_offset 20, 424
+ sd $s3, 416($sp)
+ .cfi_rel_offset 19, 416
+ sd $s2, 408($sp)
+ .cfi_rel_offset 18, 408
+ sd $s1, 400($sp)
+ .cfi_rel_offset 17, 400
+ sd $s0, 392($sp)
+ .cfi_rel_offset 16, 392
+ sd $t3, 384($sp)
+ .cfi_rel_offset 15, 384
+ sd $t2, 376($sp)
+ .cfi_rel_offset 14, 376
+ sd $t1, 368($sp)
+ .cfi_rel_offset 13, 368
+ sd $t0, 360($sp)
+ .cfi_rel_offset 12, 360
+ sd $a7, 352($sp)
+ .cfi_rel_offset 11, 352
+ sd $a6, 344($sp)
+ .cfi_rel_offset 10, 344
+ sd $a5, 336($sp)
+ .cfi_rel_offset 9, 336
+ sd $a4, 328($sp)
+ .cfi_rel_offset 8, 328
+ sd $a3, 320($sp)
+ .cfi_rel_offset 7, 320
+ sd $a2, 312($sp)
+ .cfi_rel_offset 6, 312
+ sd $a1, 304($sp)
+ .cfi_rel_offset 5, 304
+ sd $a0, 296($sp)
+ .cfi_rel_offset 4, 296
+ sd $v1, 288($sp)
+ .cfi_rel_offset 3, 288
+ sd $v0, 280($sp)
+ .cfi_rel_offset 2, 280
+
+ // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
+ bal 1f
+ .set push
+ .set noat
+ sd $at, 272($sp)
+ .cfi_rel_offset 1, 272
+ .set pop
+1:
+ .cpsetup $ra, 472, 1b
+
+ // Save FP registers.
+ s.d $f31, 264($sp)
+ s.d $f30, 256($sp)
+ s.d $f29, 248($sp)
+ s.d $f28, 240($sp)
+ s.d $f27, 232($sp)
+ s.d $f26, 224($sp)
+ s.d $f25, 216($sp)
+ s.d $f24, 208($sp)
+ s.d $f23, 200($sp)
+ s.d $f22, 192($sp)
+ s.d $f21, 184($sp)
+ s.d $f20, 176($sp)
+ s.d $f19, 168($sp)
+ s.d $f18, 160($sp)
+ s.d $f17, 152($sp)
+ s.d $f16, 144($sp)
+ s.d $f15, 136($sp)
+ s.d $f14, 128($sp)
+ s.d $f13, 120($sp)
+ s.d $f12, 112($sp)
+ s.d $f11, 104($sp)
+ s.d $f10, 96($sp)
+ s.d $f9, 88($sp)
+ s.d $f8, 80($sp)
+ s.d $f7, 72($sp)
+ s.d $f6, 64($sp)
+ s.d $f5, 56($sp)
+ s.d $f4, 48($sp)
+ s.d $f3, 40($sp)
+ s.d $f2, 32($sp)
+ s.d $f1, 24($sp)
+ s.d $f0, 16($sp)
+
+ # load appropriate callee-save-method
+ ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
+ ld $t1, 0($t1)
+ ld $t1, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t1)
+ sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
+ # Place sp in Thread::Current()->top_quick_frame.
+ sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ // Restore FP registers.
+ l.d $f31, 264($sp)
+ l.d $f30, 256($sp)
+ l.d $f29, 248($sp)
+ l.d $f28, 240($sp)
+ l.d $f27, 232($sp)
+ l.d $f26, 224($sp)
+ l.d $f25, 216($sp)
+ l.d $f24, 208($sp)
+ l.d $f23, 200($sp)
+ l.d $f22, 192($sp)
+ l.d $f21, 184($sp)
+ l.d $f20, 176($sp)
+ l.d $f19, 168($sp)
+ l.d $f18, 160($sp)
+ l.d $f17, 152($sp)
+ l.d $f16, 144($sp)
+ l.d $f15, 136($sp)
+ l.d $f14, 128($sp)
+ l.d $f13, 120($sp)
+ l.d $f12, 112($sp)
+ l.d $f11, 104($sp)
+ l.d $f10, 96($sp)
+ l.d $f9, 88($sp)
+ l.d $f8, 80($sp)
+ l.d $f7, 72($sp)
+ l.d $f6, 64($sp)
+ l.d $f5, 56($sp)
+ l.d $f4, 48($sp)
+ l.d $f3, 40($sp)
+ l.d $f2, 32($sp)
+ l.d $f1, 24($sp)
+ l.d $f0, 16($sp)
+
+ // Restore core registers.
+ .cpreturn
+ ld $ra, 488($sp)
+ .cfi_restore 31
+ ld $s8, 480($sp)
+ .cfi_restore 30
+ ld $t9, 464($sp)
+ .cfi_restore 25
+ ld $t8, 456($sp)
+ .cfi_restore 24
+ ld $s7, 448($sp)
+ .cfi_restore 23
+ ld $s6, 440($sp)
+ .cfi_restore 22
+ ld $s5, 432($sp)
+ .cfi_restore 21
+ ld $s4, 424($sp)
+ .cfi_restore 20
+ ld $s3, 416($sp)
+ .cfi_restore 19
+ ld $s2, 408($sp)
+ .cfi_restore 18
+ ld $s1, 400($sp)
+ .cfi_restore 17
+ ld $s0, 392($sp)
+ .cfi_restore 16
+ ld $t3, 384($sp)
+ .cfi_restore 15
+ ld $t2, 376($sp)
+ .cfi_restore 14
+ ld $t1, 368($sp)
+ .cfi_restore 13
+ ld $t0, 360($sp)
+ .cfi_restore 12
+ ld $a7, 352($sp)
+ .cfi_restore 11
+ ld $a6, 344($sp)
+ .cfi_restore 10
+ ld $a5, 336($sp)
+ .cfi_restore 9
+ ld $a4, 328($sp)
+ .cfi_restore 8
+ ld $a3, 320($sp)
+ .cfi_restore 7
+ ld $a2, 312($sp)
+ .cfi_restore 6
+ ld $a1, 304($sp)
+ .cfi_restore 5
+ ld $a0, 296($sp)
+ .cfi_restore 4
+ ld $v1, 288($sp)
+ .cfi_restore 3
+ ld $v0, 280($sp)
+ .cfi_restore 2
+ .set push
+ .set noat
+ ld $at, 272($sp)
+ .cfi_restore 1
+ .set pop
+
+ daddiu $sp, $sp, 496
+ .cfi_adjust_cfa_offset -496
+.endm
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode,
* where the pending
* exception is Thread::Current()->exception_
@@ -1673,17 +1895,19 @@
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
-ENTRY art_quick_test_suspend
- lh $a0, THREAD_FLAGS_OFFSET(rSELF)
- bne $a0, $zero, 1f
+ENTRY_NO_GP art_quick_test_suspend
+ lh rSUSPEND, THREAD_FLAGS_OFFSET(rSELF)
+ bne rSUSPEND, $zero, 1f
daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
+ nop
1:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
jal artTestSuspendFromCode # (Thread*)
move $a0, rSELF
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ jalr $zero, $ra
+ nop
END art_quick_test_suspend
/*
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index b7dc57f..d52945f 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -25,6 +25,8 @@
namespace art {
namespace mips64 {
+static constexpr uint32_t kMips64CalleeSaveAlwaysSpills =
+ (1 << art::mips64::RA);
static constexpr uint32_t kMips64CalleeSaveRefSpills =
(1 << art::mips64::S2) | (1 << art::mips64::S3) | (1 << art::mips64::S4) |
(1 << art::mips64::S5) | (1 << art::mips64::S6) | (1 << art::mips64::S7) |
@@ -35,6 +37,14 @@
(1 << art::mips64::A7);
static constexpr uint32_t kMips64CalleeSaveAllSpills =
(1 << art::mips64::S0) | (1 << art::mips64::S1);
+static constexpr uint32_t kMips64CalleeSaveEverythingSpills =
+ (1 << art::mips64::AT) | (1 << art::mips64::V0) | (1 << art::mips64::V1) |
+ (1 << art::mips64::A0) | (1 << art::mips64::A1) | (1 << art::mips64::A2) |
+ (1 << art::mips64::A3) | (1 << art::mips64::A4) | (1 << art::mips64::A5) |
+ (1 << art::mips64::A6) | (1 << art::mips64::A7) | (1 << art::mips64::T0) |
+ (1 << art::mips64::T1) | (1 << art::mips64::T2) | (1 << art::mips64::T3) |
+ (1 << art::mips64::S0) | (1 << art::mips64::S1) | (1 << art::mips64::T8) |
+ (1 << art::mips64::T9);
static constexpr uint32_t kMips64CalleeSaveFpRefSpills = 0;
static constexpr uint32_t kMips64CalleeSaveFpArgSpills =
@@ -46,17 +56,31 @@
(1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
(1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
(1 << art::mips64::F30) | (1 << art::mips64::F31);
+static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
+ (1 << art::mips64::F0) | (1 << art::mips64::F1) | (1 << art::mips64::F2) |
+ (1 << art::mips64::F3) | (1 << art::mips64::F4) | (1 << art::mips64::F5) |
+ (1 << art::mips64::F6) | (1 << art::mips64::F7) | (1 << art::mips64::F8) |
+ (1 << art::mips64::F9) | (1 << art::mips64::F10) | (1 << art::mips64::F11) |
+ (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
+ (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
+ (1 << art::mips64::F18) | (1 << art::mips64::F19) | (1 << art::mips64::F20) |
+ (1 << art::mips64::F21) | (1 << art::mips64::F22) | (1 << art::mips64::F23) |
+ (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
+ (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
+ (1 << art::mips64::F30) | (1 << art::mips64::F31);
constexpr uint32_t Mips64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kMips64CalleeSaveRefSpills |
+ return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveAllSpills : 0) | (1 << art::mips64::RA);
+ (type == Runtime::kSaveAll ? kMips64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kMips64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kMips64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t Mips64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 290769b..fa86bf4 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -87,6 +87,27 @@
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
@@ -219,20 +240,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index b0a6017..ba5fd99 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -21,8 +21,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
-
-// 32 bytes for GPRs and 32 bytes for FPRs.
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (32 + 32)
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (48 + 64)
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 533905e..3efeb40 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -71,18 +71,9 @@
namespace art {
-#if defined(__APPLE__) && defined(__x86_64__)
-// mac symbols have a prefix of _ on x86_64
-extern "C" void _art_quick_throw_null_pointer_exception_from_signal();
-extern "C" void _art_quick_throw_stack_overflow();
-extern "C" void _art_quick_test_suspend();
-#define EXT_SYM(sym) _ ## sym
-#else
extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_test_suspend();
-#define EXT_SYM(sym) sym
-#endif
// Note this is different from the others (no underscore on 64 bit mac) due to
// the way the symbol is defined in the .S file.
@@ -320,7 +311,7 @@
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
uc->CTX_EIP = reinterpret_cast<uintptr_t>(
- EXT_SYM(art_quick_throw_null_pointer_exception_from_signal));
+ art_quick_throw_null_pointer_exception_from_signal);
// Pass the faulting address as the first argument of
// art_quick_throw_null_pointer_exception_from_signal.
#if defined(__x86_64__)
@@ -397,7 +388,7 @@
*next_sp = retaddr;
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_test_suspend));
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
// Now remove the suspend trigger that caused this fault.
Thread::Current()->RemoveSuspendTrigger();
@@ -443,7 +434,7 @@
// the previous frame.
// Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_stack_overflow));
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
return true;
}
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 77e04e7..68ba0cf 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -222,6 +222,74 @@
END_MACRO
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+MACRO2(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+ // Save core registers.
+ PUSH edi
+ PUSH esi
+ PUSH ebp
+ PUSH ebx
+ PUSH edx
+ PUSH ecx
+ PUSH eax
+ // Create space for FPR registers and stack alignment padding.
+ subl MACRO_LITERAL(12 + 8 * 8), %esp
+ CFI_ADJUST_CFA_OFFSET(12 + 8 * 8)
+ // Save FPRs.
+ movsd %xmm0, 12(%esp)
+ movsd %xmm1, 20(%esp)
+ movsd %xmm2, 28(%esp)
+ movsd %xmm3, 36(%esp)
+ movsd %xmm4, 44(%esp)
+ movsd %xmm5, 52(%esp)
+ movsd %xmm6, 60(%esp)
+ movsd %xmm7, 68(%esp)
+
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg)
+ // Load Runtime::instance_ from GOT.
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
+ movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
+ // Push save everything callee-save method.
+ pushl RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ CFI_ADJUST_CFA_OFFSET(4)
+ // Store esp as the stop quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +4: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 7*4 + 8*8 + 12 + 4 + 4)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86) size not as expected."
+#endif
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+ // Restore FPRs. Method and padding is still on the stack.
+ movsd 16(%esp), %xmm0
+ movsd 24(%esp), %xmm1
+ movsd 32(%esp), %xmm2
+ movsd 40(%esp), %xmm3
+ movsd 48(%esp), %xmm4
+ movsd 56(%esp), %xmm5
+ movsd 64(%esp), %xmm6
+ movsd 72(%esp), %xmm7
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addl MACRO_LITERAL(16 + 8 * 8), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 8 * 8))
+
+ // Restore core registers.
+ POP eax
+ POP ecx
+ POP edx
+ POP ebx
+ POP ebp
+ POP esi
+ POP edi
+END_MACRO
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_.
*/
@@ -661,22 +729,6 @@
ret
END_FUNCTION art_quick_invoke_static_stub
-MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
- // Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // push padding
- CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- call CALLVAR(cxx_name) // cxx_name(Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro) // return or deliver exception
- END_FUNCTION VAR(c_name)
-END_MACRO
-
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
@@ -1028,7 +1080,13 @@
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
// Read barrier for class load.
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Null check so that we can load the lock word.
+ testl %edx, %edx
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
@@ -1065,7 +1123,7 @@
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
test %ecx, %ecx
jnz .Lalready_thin // lock word contains a thin lock
// unlocked case - edx: original lock word, eax: obj.
@@ -1081,9 +1139,9 @@
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the read barrier bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count for overflow check.
- test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set.
+ test LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // overflowed if the first gc state bit is set.
jne .Lslow_lock // count overflowed so go slow
movl %eax, %ecx // save obj to use eax for cmpxchg.
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
@@ -1137,13 +1195,13 @@
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %edx // zero the gc bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %eax, %edx // edx: obj
movl %ecx, %eax // eax: old lock word.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // ecx: new lock word zero except original rb bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
#else
@@ -1397,7 +1455,19 @@
ret
END_FUNCTION art_quick_memcpy
-NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+DEFINE_FUNCTION art_quick_test_suspend
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME ebx, ebx // save everything for GC
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artTestSuspendFromCode) // (Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ ret // return
+END_FUNCTION art_quick_test_suspend
DEFINE_FUNCTION art_quick_d2l
subl LITERAL(12), %esp // alignment padding, room for argument
@@ -1923,6 +1993,14 @@
// convention (e.g. standard callee-save registers are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
+ // Null check so that we can load the lock word.
+ test REG_VAR(reg), REG_VAR(reg)
+ jz .Lret_rb_\name
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
+ jz .Lslow_rb_\name
+ ret
+.Lslow_rb_\name:
// Save all potentially live caller-save core registers.
PUSH eax
PUSH ecx
@@ -1970,6 +2048,7 @@
POP_REG_NE edx, RAW_VAR(reg)
POP_REG_NE ecx, RAW_VAR(reg)
POP_REG_NE eax, RAW_VAR(reg)
+.Lret_rb_\name:
ret
END_FUNCTION VAR(name)
END_MACRO
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
index 24c671c..a1612c3 100644
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -36,21 +36,33 @@
XMM7 = 7,
};
+static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
+ (1 << art::x86::kNumberOfCpuRegisters); // Fake return address callee save.
static constexpr uint32_t kX86CalleeSaveRefSpills =
(1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
static constexpr uint32_t kX86CalleeSaveArgSpills =
(1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+static constexpr uint32_t kX86CalleeSaveEverythingSpills =
+ (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
static constexpr uint32_t kX86CalleeSaveFpArgSpills =
(1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
(1 << art::x86::XMM2) | (1 << art::x86::XMM3);
+static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
+ (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+ (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
+ (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
+ (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kX86CalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
- (1 << art::x86::kNumberOfCpuRegisters); // fake return address callee save
+ return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return type == Runtime::kRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0;
+ return (type == Runtime::kRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index c4e723c..0728f99 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -31,7 +31,8 @@
// Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
// separate the backslash and parameter by a space. Everything just works.
#define RAW_VAR(name) \name
- #define VAR(name) SYMBOL(\name)
+ #define VAR(name) \name
+ #define CALLVAR(name) SYMBOL(\name)
#define PLT_VAR(name) \name@PLT
#define REG_VAR(name) %\name
#define CALL_MACRO(name) \name
@@ -45,6 +46,7 @@
.altmacro
#define RAW_VAR(name) name&
#define VAR(name) name&
+ #define CALLVAR(name) SYMBOL(name&)
#define PLT_VAR(name) name&@PLT
#define REG_VAR(name) %name
#define CALL_MACRO(name) name&
@@ -110,10 +112,10 @@
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(SYMBOL(\c_name))
- ASM_HIDDEN SYMBOL(\c_name)
- .globl VAR(c_name)
+ ASM_HIDDEN CALLVAR(c_name)
+ .globl CALLVAR(c_name)
ALIGN_FUNCTION_ENTRY
-VAR(c_name):
+CALLVAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 48bec73..58dc2fe 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -21,6 +21,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE (64 + 4*8)
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE (64 + 4*8)
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (176 + 4*8)
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (112 + 12*8)
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (144 + 16*8)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 784ec39..4741ac0 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -165,8 +165,8 @@
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
// Create space for FPR args and create 2 slots for ArtMethod*.
- subq MACRO_LITERAL(80 + 4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
+ subq MACRO_LITERAL(16 + 12 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(16 + 12 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
@@ -189,7 +189,7 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 4 * 8 + 80 + 8)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 12 * 8 + 16 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
@@ -260,6 +260,108 @@
POP r15
END_MACRO
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+MACRO0(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+#if defined(__APPLE__)
+ int3
+ int3
+#else
+ // Save core registers from highest to lowest to agree with core spills bitmap.
+ PUSH r15
+ PUSH r14
+ PUSH r13
+ PUSH r12
+ PUSH r11
+ PUSH r10
+ PUSH r9
+ PUSH r8
+ PUSH rdi
+ PUSH rsi
+ PUSH rbp
+ PUSH rbx
+ PUSH rdx
+ PUSH rcx
+ PUSH rax
+ // Create space for FPRs and stack alignment padding.
+ subq MACRO_LITERAL(8 + 16 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(8 + 16 * 8)
+ // R10 := Runtime::Current()
+ movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+ movq (%r10), %r10
+ // Save FPRs.
+ movq %xmm0, 8(%rsp)
+ movq %xmm1, 16(%rsp)
+ movq %xmm2, 24(%rsp)
+ movq %xmm3, 32(%rsp)
+ movq %xmm4, 40(%rsp)
+ movq %xmm5, 48(%rsp)
+ movq %xmm6, 56(%rsp)
+ movq %xmm7, 64(%rsp)
+ movq %xmm8, 72(%rsp)
+ movq %xmm9, 80(%rsp)
+ movq %xmm10, 88(%rsp)
+ movq %xmm11, 96(%rsp)
+ movq %xmm12, 104(%rsp)
+ movq %xmm13, 112(%rsp)
+ movq %xmm14, 120(%rsp)
+ movq %xmm15, 128(%rsp)
+ // Push ArtMethod* for save everything frame method.
+ pushq RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(%r10)
+ CFI_ADJUST_CFA_OFFSET(8)
+ // Store rsp as the top quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 15 * 8 + 16 * 8 + 16 + 8)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#endif
+#endif // __APPLE__
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+ // Restore FPRs. Method and padding is still on the stack.
+ movq 16(%rsp), %xmm0
+ movq 24(%rsp), %xmm1
+ movq 32(%rsp), %xmm2
+ movq 40(%rsp), %xmm3
+ movq 48(%rsp), %xmm4
+ movq 56(%rsp), %xmm5
+ movq 64(%rsp), %xmm6
+ movq 72(%rsp), %xmm7
+ movq 80(%rsp), %xmm8
+ movq 88(%rsp), %xmm9
+ movq 96(%rsp), %xmm10
+ movq 104(%rsp), %xmm11
+ movq 112(%rsp), %xmm12
+ movq 120(%rsp), %xmm13
+ movq 128(%rsp), %xmm14
+ movq 136(%rsp), %xmm15
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addq MACRO_LITERAL(16 + 16 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 16 * 8))
+ // Restore callee and GPR args, mixed together to agree with core spills bitmap.
+ POP rax
+ POP rcx
+ POP rdx
+ POP rbx
+ POP rbp
+ POP rsi
+ POP rdi
+ POP r8
+ POP r9
+ POP r10
+ POP r11
+ POP r12
+ POP r13
+ POP r14
+ POP r15
+END_MACRO
+
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
@@ -278,7 +380,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -288,7 +390,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -298,7 +400,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -371,7 +473,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
movq %rsp, %rcx // pass SP
- call VAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -702,23 +804,12 @@
#endif // __APPLE__
END_FUNCTION art_quick_do_long_jump
-MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- // Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro) // return or deliver exception
- END_FUNCTION VAR(c_name)
-END_MACRO
-
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -729,7 +820,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -740,7 +831,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -751,7 +842,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -763,7 +854,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -775,7 +866,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name) // (arg0, arg1, referrer, Thread*)
+ call CALLVAR(cxx_name) // (arg0, arg1, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -787,7 +878,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -950,7 +1041,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
@@ -989,7 +1080,13 @@
// Load the class
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Null check so that we can load the lock word.
+ testl %edx, %edx
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
@@ -1022,7 +1119,7 @@
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
test %ecx, %ecx
jnz .Lalready_thin // Lock word contains a thin lock.
// unlocked case - edx: original lock word, edi: obj.
@@ -1037,9 +1134,9 @@
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count
- test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set
+ test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if the upper bit (28) is set
jne .Lslow_lock // count overflowed so go slow
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx // increment recursion count again for real.
@@ -1074,12 +1171,12 @@
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %edx // zero the gc bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %ecx, %eax // eax: old lock word.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // ecx: new lock word zero except original gc bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
#else
@@ -1329,7 +1426,14 @@
ret
END_FUNCTION art_quick_memcpy
-NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+DEFINE_FUNCTION art_quick_test_suspend
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save everything for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call SYMBOL(artTestSuspendFromCode) // (Thread*)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ ret
+END_FUNCTION art_quick_test_suspend
UNIMPLEMENTED art_quick_ldiv
UNIMPLEMENTED art_quick_lmod
@@ -1833,6 +1937,14 @@
// convention (e.g. standard callee-save registers are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
+ // Null check so that we can load the lock word.
+ testq REG_VAR(reg), REG_VAR(reg)
+ jz .Lret_rb_\name
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
+ jz .Lslow_rb_\name
+ ret
+.Lslow_rb_\name:
// Save all potentially live caller-save core registers.
PUSH rax
PUSH rcx
@@ -1897,6 +2009,7 @@
POP_REG_NE rdx, RAW_VAR(reg)
POP_REG_NE rcx, RAW_VAR(reg)
POP_REG_NE rax, RAW_VAR(reg)
+.Lret_rb_\name:
ret
END_FUNCTION VAR(name)
END_MACRO
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
index 37eff83..aa75b56 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -25,12 +25,19 @@
namespace art {
namespace x86_64 {
+static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
+ (1 << art::x86_64::kNumberOfCpuRegisters); // Fake return address callee save.
static constexpr uint32_t kX86_64CalleeSaveRefSpills =
(1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
(1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
static constexpr uint32_t kX86_64CalleeSaveArgSpills =
(1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
(1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
+ (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
+ (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
+ (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
+
static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
(1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
(1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
@@ -38,16 +45,24 @@
static constexpr uint32_t kX86_64CalleeSaveFpSpills =
(1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
(1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
+static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
+ (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
+ (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
+ (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+ (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
+ (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
+ (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kX86_64CalleeSaveRefSpills |
+ return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
- (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save;
+ (type == Runtime::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kX86_64CalleeSaveFpSpills |
- (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0);
+ (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 0619af8..d4cee44 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -20,6 +20,7 @@
#if defined(__cplusplus)
#include "art_method.h"
#include "gc/allocator/rosalloc.h"
+#include "gc/heap.h"
#include "jit/jit.h"
#include "lock_word.h"
#include "mirror/class.h"
@@ -174,10 +175,17 @@
#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
+ art::mirror::Class::PrimitiveTypeOffset().Int32Value())
#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
+#define PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT 16
+ADD_TEST_EQ(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
+ static_cast<int>(art::mirror::Class::kPrimitiveTypeSizeShiftShift))
+
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
index 19ad302..32283d0 100644
--- a/runtime/base/array_slice.h
+++ b/runtime/base/array_slice.h
@@ -129,6 +129,10 @@
return element_size_;
}
+ bool Contains(const T* element) const {
+ return &AtUnchecked(0) <= element && element < &AtUnchecked(size_);
+ }
+
private:
T& AtUnchecked(size_t index) {
return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index c79e287..d0dc886 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -87,7 +87,6 @@
kTracingUniqueMethodsLock,
kTracingStreamingLock,
kDeoptimizedMethodsLock,
- kJitCodeCacheLock,
kClassLoaderClassesLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
@@ -98,6 +97,7 @@
kMonitorPoolLock,
kMethodVerifiersLock,
kClassLinkerClassesLock, // TODO rename.
+ kJitCodeCacheLock,
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 6f0e125..48e3ceb 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -132,14 +132,14 @@
}
bool FdFile::Open(const std::string& path, int flags, mode_t mode) {
+ static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
CHECK_EQ(fd_, -1) << path;
- read_only_mode_ = (flags & O_RDONLY) != 0;
+ read_only_mode_ = ((flags & O_ACCMODE) == O_RDONLY);
fd_ = TEMP_FAILURE_RETRY(open(path.c_str(), flags, mode));
if (fd_ == -1) {
return false;
}
file_path_ = path;
- static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
// Start in the base state (not flushed, not closed).
guard_state_ = GuardState::kBase;
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index db3a44f..99ef6f7 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -53,12 +53,14 @@
ASSERT_TRUE(file.IsOpened());
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+ EXPECT_FALSE(file.ReadOnlyMode());
EXPECT_EQ(0, file.Flush());
EXPECT_EQ(0, file.Close());
EXPECT_EQ(-1, file.Fd());
EXPECT_FALSE(file.IsOpened());
- FdFile file2(good_path, O_RDONLY, true);
+ FdFile file2(good_path, O_RDONLY, true);
EXPECT_TRUE(file2.IsOpened());
+ EXPECT_TRUE(file2.ReadOnlyMode());
EXPECT_GE(file2.Fd(), 0);
ASSERT_EQ(file2.Close(), 0);
@@ -70,6 +72,7 @@
art::ScratchFile tmp;
FdFile file(tmp.GetFilename(), O_RDONLY, false);
ASSERT_TRUE(file.IsOpened());
+ EXPECT_TRUE(file.ReadOnlyMode());
EXPECT_GE(file.Fd(), 0);
uint8_t buffer[16];
EXPECT_FALSE(file.ReadFully(&buffer, 4));
@@ -86,6 +89,7 @@
FdFile file(tmp.GetFilename(), O_RDWR, false);
ASSERT_TRUE(file.IsOpened());
EXPECT_GE(file.Fd(), 0);
+ EXPECT_FALSE(file.ReadOnlyMode());
char ignore_prefix[20] = {'a', };
NullTerminateCharArray(ignore_prefix);
@@ -114,6 +118,7 @@
FdFile file(tmp.GetFilename(), O_RDWR, false);
ASSERT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+ EXPECT_FALSE(file.ReadOnlyMode());
const char* test_string = "This is a test string";
size_t length = strlen(test_string) + 1;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3c64c81..46722ec 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1501,11 +1501,8 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(error_msg != nullptr);
std::unique_ptr<const DexFile> dex_file;
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr);
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Failed finding oat dex file for %s %s",
- oat_file->GetLocation().c_str(),
- location);
return std::unique_ptr<const DexFile>();
}
std::string inner_error_msg;
@@ -3563,32 +3560,40 @@
}
LOG(INFO) << "Loaded class " << descriptor << source;
}
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::ClassLoader* const class_loader = klass->GetClassLoader();
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- return existing;
- }
- if (kIsDebugBuild &&
- !klass->IsTemp() &&
- class_loader == nullptr &&
- dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromBootImage(descriptor);
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ mirror::ClassLoader* const class_loader = klass->GetClassLoader();
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
- CHECK_EQ(klass, existing);
+ return existing;
+ }
+ if (kIsDebugBuild &&
+ !klass->IsTemp() &&
+ class_loader == nullptr &&
+ dex_cache_boot_image_class_lookup_required_) {
+ // Check a class loaded with the system class loader matches one in the image if the class
+ // is in the image.
+ existing = LookupClassFromBootImage(descriptor);
+ if (existing != nullptr) {
+ CHECK_EQ(klass, existing);
+ }
+ }
+ VerifyObject(klass);
+ class_table->InsertWithHash(klass, hash);
+ if (class_loader != nullptr) {
+ // This is necessary because we need to have the card dirtied for remembered sets.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ }
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
}
- VerifyObject(klass);
- class_table->InsertWithHash(klass, hash);
- if (class_loader != nullptr) {
- // This is necessary because we need to have the card dirtied for remembered sets.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
- }
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+ if (kIsDebugBuild) {
+ // Test that copied methods correctly can find their holder.
+ for (ArtMethod& method : klass->GetCopiedMethods(image_pointer_size_)) {
+ CHECK_EQ(GetHoldingClassOfCopiedMethod(&method), klass);
+ }
}
return nullptr;
}
@@ -8108,19 +8113,27 @@
void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
- const ClassLoaderData& data = *it;
- // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
- auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
- if (class_loader != nullptr) {
- ++it;
- } else {
- VLOG(class_linker) << "Freeing class loader";
- DeleteClassLoader(self, data);
- it = class_loaders_.erase(it);
+ std::vector<ClassLoaderData> to_delete;
+ // Do the delete outside the lock to avoid lock violation in jit code cache.
+ {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
+ const ClassLoaderData& data = *it;
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ auto* const class_loader =
+ down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
+ if (class_loader != nullptr) {
+ ++it;
+ } else {
+ VLOG(class_linker) << "Freeing class loader";
+ to_delete.push_back(data);
+ it = class_loaders_.erase(it);
+ }
}
}
+ for (ClassLoaderData& data : to_delete) {
+ DeleteClassLoader(self, data);
+ }
}
std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
@@ -8239,6 +8252,33 @@
return ret;
}
+class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
+ public:
+ FindVirtualMethodHolderVisitor(const ArtMethod* method, PointerSize pointer_size)
+ : method_(method),
+ pointer_size_(pointer_size) {}
+
+ bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
+ holder_ = klass;
+ }
+ // Return false to stop searching if holder_ is not null.
+ return holder_ == nullptr;
+ }
+
+ mirror::Class* holder_ = nullptr;
+ const ArtMethod* const method_;
+ const PointerSize pointer_size_;
+};
+
+mirror::Class* ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) {
+ ScopedTrace trace(__FUNCTION__); // Since this function is slow, have a trace to notify people.
+ CHECK(method->IsCopied());
+ FindVirtualMethodHolderVisitor visitor(method, image_pointer_size_);
+ VisitClasses(&visitor);
+ return visitor.holder_;
+}
+
// Instantiate ResolveMethod.
template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
const DexFile& dex_file,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fcc6b23..c3ab8c5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -648,6 +648,10 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
+ // Get the actual holding class for a copied method. Pretty slow, don't call often.
+ mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -676,7 +680,6 @@
SHARED_REQUIRES(Locks::mutator_lock_);
static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
- REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
@@ -1168,6 +1171,7 @@
// Image pointer size.
PointerSize image_pointer_size_;
+ class FindVirtualMethodHolderVisitor;
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9f3ff3f..2a5198b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1454,6 +1454,15 @@
}
}
+static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
+ if (!method->IsStatic()) {
+ ++num_registers;
+ }
+ return num_registers;
+}
+
/*
* Circularly shifts registers so that arguments come last. Reverts
* slots to dex style argument placement.
@@ -1465,7 +1474,7 @@
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
- uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
+ uint16_t vreg_count = GetMethodNumArgRegistersIncludingThis(m);
if (slot < vreg_count) {
*error = JDWP::ERR_NONE;
return slot;
@@ -1637,8 +1646,7 @@
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
- std::string shorty(m->GetShorty());
- expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
+ expandBufAdd4BE(pReply, GetMethodNumArgRegistersIncludingThis(m));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 061babd..a6eb5f6 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -338,6 +338,11 @@
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ return nullptr;
+ }
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
@@ -435,6 +440,8 @@
MemMap* mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
+ DCHECK(base != nullptr);
+ DCHECK_NE(size, 0U);
CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
std::unique_ptr<DexFile> dex_file(
new DexFile(base, size, location, location_checksum, mem_map, oat_dex_file));
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 616c2a0..2704d8a 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -166,6 +166,12 @@
"uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
"AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+static const char kRawDexZeroLength[] =
+ "UEsDBAoAAAAAAOhxAkkAAAAAAAAAAAAAAAALABwAY2xhc3Nlcy5kZXhVVAkAA2QNoVdnDaFXdXgL"
+ "AAEE5AMBAASIEwAAUEsBAh4DCgAAAAAA6HECSQAAAAAAAAAAAAAAAAsAGAAAAAAAAAAAAKCBAAAA"
+ "AGNsYXNzZXMuZGV4VVQFAANkDaFXdXgLAAEE5AMBAASIEwAAUEsFBgAAAAABAAEAUQAAAEUAAAAA"
+ "AA==";
+
static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
@@ -254,6 +260,18 @@
ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
}
+TEST_F(DexFileTest, ZeroLengthDexRejected) {
+ ScratchFile tmp;
+ const char* location = tmp.GetFilename().c_str();
+ DecodeAndWriteDexFile(kRawDexZeroLength, location);
+
+ ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+}
+
TEST_F(DexFileTest, GetLocationChecksum) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> raw(OpenTestDexFile("Main"));
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 4019a5b..fb774a4 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -154,11 +154,30 @@
}
inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
+ mirror::Object* ret;
+ // TODO: Delete GetMarkBit check when all of the callers properly check the bit. Remaining caller
+ // is array allocations.
+ if (from_ref == nullptr || from_ref->GetMarkBit()) {
+ return from_ref;
+ }
// TODO: Consider removing this check when we are done investigating slow paths. b/30162165
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
- return MarkFromReadBarrierWithMeasurements(from_ref);
+ ret = MarkFromReadBarrierWithMeasurements(from_ref);
+ } else {
+ ret = Mark(from_ref);
}
- return Mark(from_ref);
+ // Only set the mark bit for baker barrier.
+ if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
+ // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
+ // values are OK since the only race is doing an unnecessary Mark.
+ if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
+ // Mark stack is full, set the bit back to zero.
+ CHECK(ret->AtomicSetMarkBit(1, 0));
+ // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
+ rb_mark_bit_stack_full_ = true;
+ }
+ }
+ return ret;
}
inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d7221e4..071537d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -42,9 +42,6 @@
namespace collector {
static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
-// If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
-// pages.
-static constexpr bool kGrayDirtyImmuneObjects = true;
// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
// union table. Disabled since it does not seem to help the pause much.
static constexpr bool kFilterModUnionCards = kIsDebugBuild;
@@ -52,6 +49,9 @@
// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
// Only enabled for kIsDebugBuild to avoid performance hit.
static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
+// Slow path mark stack size, increase this if the stack is getting full and it is causing
+// performance problems.
+static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
ConcurrentCopying::ConcurrentCopying(Heap* heap,
const std::string& name_prefix,
@@ -63,6 +63,10 @@
gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
kDefaultGcMarkStackSize,
kDefaultGcMarkStackSize)),
+ rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
+ kReadBarrierMarkStackSize,
+ kReadBarrierMarkStackSize)),
+ rb_mark_bit_stack_full_(false),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
@@ -187,6 +191,7 @@
CHECK(false_gray_stack_.empty());
}
+ rb_mark_bit_stack_full_ = false;
mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.StoreRelaxed(0);
@@ -914,9 +919,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
if (kUseBakerReadBarrier) {
- CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
<< "Ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
+ << " has non-white rb_ptr ";
}
}
@@ -982,7 +987,7 @@
VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
- CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
<< "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
}
}
@@ -2243,6 +2248,15 @@
}
}
}
+ if (kUseBakerReadBarrier) {
+ TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
+ DCHECK(rb_mark_bit_stack_.get() != nullptr);
+ const auto* limit = rb_mark_bit_stack_->End();
+ for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
+ CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
+ }
+ rb_mark_bit_stack_->Reset();
+ }
}
if (measure_read_barrier_slow_path_) {
MutexLock mu(self, rb_slow_path_histogram_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 72112fa..a862802 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -57,6 +57,9 @@
static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
// Enable verbose mode.
static constexpr bool kVerboseMode = false;
+ // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
+ // pages.
+ static constexpr bool kGrayDirtyImmuneObjects = true;
ConcurrentCopying(Heap* heap,
const std::string& name_prefix = "",
@@ -230,6 +233,8 @@
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
+ std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
+ bool rb_mark_bit_stack_full_;
std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<accounting::ObjectStack*> revoked_mark_stacks_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a92cb24..88fbf78 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -257,6 +257,7 @@
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
+ CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
ScopedTrace trace(__FUNCTION__);
Runtime* const runtime = Runtime::Current();
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
@@ -2538,6 +2539,17 @@
AddSpace(zygote_space_);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
+ if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+ // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
+ // safe since we mark all of the objects that may reference non immune objects as gray.
+ zygote_space_->GetLiveBitmap()->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
+ reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
+ [](mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ CHECK(obj->AtomicSetMarkBit(0, 1));
+ });
+ }
+
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index bb0d11a..be8ed40 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -132,7 +132,8 @@
static constexpr double kDefaultTargetUtilization = 0.5;
static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
// Primitive arrays larger than this size are put in the large object space.
- static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+ static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
+ static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
// Whether or not parallel GC is enabled. If not, then we never create the thread pool.
static constexpr bool kDefaultEnableParallelGC = false;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index d140b75..8ade185 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1436,6 +1436,8 @@
image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod));
CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs),
image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveEverything),
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
} else if (!runtime->HasResolutionMethod()) {
runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
runtime->SetResolutionMethod(image_header->GetImageMethod(ImageHeader::kResolutionMethod));
@@ -1448,6 +1450,8 @@
image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
runtime->SetCalleeSaveMethod(
image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ runtime->SetCalleeSaveMethod(
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod), Runtime::kSaveEverything);
}
VLOG(image) << "ImageSpace::Init exiting " << *space.get();
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 3734bcc..0304d0d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -195,7 +195,8 @@
return root_.IsNull();
}
- ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE GcRoot() {}
+ explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 5d62b59..3d3cc4e 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -32,6 +32,8 @@
DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsOnly))))
#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 0x10
DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsAndArgs))))
+#define RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET 0x18
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveEverything))))
#define THREAD_FLAGS_OFFSET 0
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
@@ -68,18 +70,30 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 48
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
+DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
#define LOCK_WORD_STATE_MASK 0xc0000000
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x30000000
+#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xcfffffff
+#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
+#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SHIFT 28
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
+#define LOCK_WORD_MARK_BIT_SHIFT 29
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_MARK_BIT_SHIFT), (static_cast<int32_t>(art::LockWord::kMarkBitStateShift)))
+#define LOCK_WORD_MARK_BIT_MASK_SHIFTED 0x20000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_MARK_BIT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kMarkBitStateMaskShifted)))
#define OBJECT_ALIGNMENT_MASK 0x7
DEFINE_CHECK_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), (static_cast<size_t>(art::kObjectAlignment - 1)))
#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
diff --git a/runtime/globals.h b/runtime/globals.h
index 0b44c47..9045d40 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -47,7 +47,8 @@
}
// Required object alignment
-static constexpr size_t kObjectAlignment = 8;
+static constexpr size_t kObjectAlignmentShift = 3;
+static constexpr size_t kObjectAlignment = 1u << kObjectAlignmentShift;
static constexpr size_t kLargeObjectAlignment = kPageSize;
// Whether or not this is a debug build. Useful in conditionals where NDEBUG isn't.
diff --git a/runtime/image.h b/runtime/image.h
index a98cea1..207a818 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -186,6 +186,7 @@
kCalleeSaveMethod,
kRefsOnlySaveMethod,
kRefsAndArgsSaveMethod,
+ kSaveEverythingMethod,
kImageMethodsCount, // Number of elements in enum.
};
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
index b8f0d92..7c9c631 100644
--- a/runtime/interpreter/mterp/arm/binopLit8.S
+++ b/runtime/interpreter/mterp/arm/binopLit8.S
@@ -1,10 +1,14 @@
-%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+%default {"extract":"asr r1, r3, #8", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -17,14 +21,13 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ $extract @ optional; typically r1<- ssssssCC (sign extended)
.if $chkzero
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit8.S b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
index b84684a..035510d 100644
--- a/runtime/interpreter/mterp/arm/op_add_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"instr":"add r0, r0, r1"}
+%include "arm/binopLit8.S" {"extract":"", "instr":"add r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit8.S b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
index d5783e5..af746b5 100644
--- a/runtime/interpreter/mterp/arm/op_and_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"instr":"and r0, r0, r1"}
+%include "arm/binopLit8.S" {"extract":"", "instr":"and r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit8.S b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
index 2d85038..9882bfc 100644
--- a/runtime/interpreter/mterp/arm/op_or_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"instr":"orr r0, r0, r1"}
+%include "arm/binopLit8.S" {"extract":"", "instr":"orr r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
index 2ee11e1..dc953dc 100644
--- a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"instr":"rsb r0, r0, r1"}
+%include "arm/binopLit8.S" {"extract":"", "instr":"rsb r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
index 6a48bfc..60a1498 100644
--- a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
+%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
index 60fe5fc..c2f6cb0 100644
--- a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
+%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
index 40a4435..5554eb0 100644
--- a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
+%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
index 46bb712..97d0b9e 100644
--- a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
+++ b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
@@ -1 +1 @@
-%include "arm/binopLit8.S" {"instr":"eor r0, r0, r1"}
+%include "arm/binopLit8.S" {"extract":"", "instr":"eor r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/binopLit8.S b/runtime/interpreter/mterp/arm64/binopLit8.S
index 0b7c68a..dfa3169 100644
--- a/runtime/interpreter/mterp/arm64/binopLit8.S
+++ b/runtime/interpreter/mterp/arm64/binopLit8.S
@@ -1,10 +1,14 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
+%default {"extract": "asr w1, w3, #8", "preinstr":"", "result":"w0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = w0 op w1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -17,7 +21,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ $extract // optional; typically w1<- ssssssCC (sign extended)
.if $chkzero
cbz w1, common_errDivideByZero
.endif
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
index 196ea99..2dfb8b9 100644
--- a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"add w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"", "instr":"add w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
index 167b40e..495b5cd 100644
--- a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"and w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"", "instr":"and w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
index 51675f8..7cb26b7 100644
--- a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"orr w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"", "instr":"orr w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
index 17f57f9..9c19b55 100644
--- a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"lsl w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
index 274080c..c7b61df 100644
--- a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"asr w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
index ff30e1f..555ed4e 100644
--- a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"lsr w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
index 6d187b5..1d3d93e 100644
--- a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
+++ b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
@@ -1 +1 @@
-%include "arm64/binopLit8.S" {"instr":"eor w0, w0, w1"}
+%include "arm64/binopLit8.S" {"extract":"", "instr":"eor w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 1bcdd76..c33df6d 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -6473,6 +6473,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6485,15 +6489,14 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- add r0, r0, r1 @ r0<- op, r0-r3 changed
+ add r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
@@ -6511,6 +6514,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6523,15 +6530,14 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ rsb r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
@@ -6550,6 +6556,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6562,14 +6572,13 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ asr r1, r3, #8 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
mul r0, r1, r0 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
@@ -6657,6 +6666,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6669,15 +6682,14 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- and r0, r0, r1 @ r0<- op, r0-r3 changed
+ and r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
@@ -6695,6 +6707,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6707,15 +6723,14 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ orr r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
@@ -6733,6 +6748,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6745,15 +6764,14 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ eor r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
@@ -6771,6 +6789,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6783,14 +6805,13 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
@@ -6809,6 +6830,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6821,14 +6846,13 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
@@ -6847,6 +6871,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
@@ -6859,14 +6887,13 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
.if 0
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 136bf20..c7303b9 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6044,6 +6044,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6056,13 +6060,13 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
// optional op; may set condition codes
- add w0, w0, w1 // w0<- op, w0-w3 changed
+ add w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w0, w9 // vAA<- w0
GOTO_OPCODE ip // jump to next instruction
@@ -6080,6 +6084,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6092,7 +6100,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
@@ -6117,6 +6125,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6129,7 +6141,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
@@ -6153,6 +6165,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6165,7 +6181,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
.if 1
cbz w1, common_errDivideByZero
.endif
@@ -6189,6 +6205,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6201,7 +6221,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
.if 1
cbz w1, common_errDivideByZero
.endif
@@ -6225,6 +6245,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6237,13 +6261,13 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
// optional op; may set condition codes
- and w0, w0, w1 // w0<- op, w0-w3 changed
+ and w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w0, w9 // vAA<- w0
GOTO_OPCODE ip // jump to next instruction
@@ -6261,6 +6285,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6273,13 +6301,13 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
// optional op; may set condition codes
- orr w0, w0, w1 // w0<- op, w0-w3 changed
+ orr w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w0, w9 // vAA<- w0
GOTO_OPCODE ip // jump to next instruction
@@ -6297,6 +6325,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6309,13 +6341,13 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
// optional op; may set condition codes
- eor w0, w0, w1 // w0<- op, w0-w3 changed
+ eor w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w0, w9 // vAA<- w0
GOTO_OPCODE ip // jump to next instruction
@@ -6333,6 +6365,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6345,7 +6381,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
@@ -6369,6 +6405,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6381,7 +6421,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
@@ -6405,6 +6445,10 @@
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
@@ -6417,7 +6461,7 @@
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
.if 0
cbz w1, common_errDivideByZero
.endif
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d52030f..cff2354 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -692,9 +692,6 @@
DCHECK(this_object != nullptr);
ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
- // Since the instrumentation is marked from the declaring class we need to mark the card so
- // that mod-union tables and card rescanning know about the update.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
info->AddInvokeInfo(dex_pc, this_object->GetClass());
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 6dc1578..1938221 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -146,7 +146,6 @@
// Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
- REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 07c8051..216df2f 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -25,10 +25,33 @@
namespace art {
+ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
+ : number_of_inline_caches_(entries.size()),
+ method_(method),
+ is_method_being_compiled_(false),
+ is_osr_method_being_compiled_(false),
+ current_inline_uses_(0),
+ saved_entry_point_(nullptr) {
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ cache_[i].dex_pc_ = entries[i];
+ }
+ if (method->IsCopied()) {
+ // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
+ // appears to happen very rarely in practice.
+ holding_class_ = GcRoot<mirror::Class>(
+ Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
+ } else {
+ holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
+ }
+ DCHECK(!holding_class_.IsNull());
+}
+
bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
DCHECK(!method->IsNative());
+
const DexFile::CodeItem& code_item = *method->GetCodeItem();
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -93,6 +116,14 @@
--i;
} else {
// We successfully set `cls`, just return.
+ // Since the instrumentation is marked from the declaring class we need to mark the card so
+ // that mod-union tables and card rescanning know about the update.
+ // Note that the declaring class is not necessarily the holding class if the method is
+ // copied. We need the card mark to be in the holding class since that is from where we
+ // will visit the profiling info.
+ if (!holding_class_.IsNull()) {
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
+ }
return;
}
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d04d2de..a890fbb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -105,6 +105,7 @@
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
+ visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
InlineCache* cache = &cache_[i];
for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
@@ -166,18 +167,7 @@
}
private:
- ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
- : number_of_inline_caches_(entries.size()),
- method_(method),
- is_method_being_compiled_(false),
- is_osr_method_being_compiled_(false),
- current_inline_uses_(0),
- saved_entry_point_(nullptr) {
- memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
- for (size_t i = 0; i < number_of_inline_caches_; ++i) {
- cache_[i].dex_pc_ = entries[i];
- }
- }
+ ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);
// Number of instructions we are profiling in the ArtMethod.
const uint32_t number_of_inline_caches_;
@@ -185,6 +175,9 @@
// Method this profiling info is for.
ArtMethod* const method_;
+ // Holding class for the method in case method is a copied method.
+ GcRoot<mirror::Class> holding_class_;
+
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
// TODO: Make the JIT code cache lock global.
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index 341501b..4a2a293 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -43,17 +43,15 @@
inline size_t LockWord::ForwardingAddress() const {
DCHECK_EQ(GetState(), kForwardingAddress);
- return value_ << kStateSize;
+ return value_ << kForwardingAddressShift;
}
inline LockWord::LockWord() : value_(0) {
DCHECK_EQ(GetState(), kUnlocked);
}
-inline LockWord::LockWord(Monitor* mon, uint32_t rb_state)
- : value_(mon->GetMonitorId() | (rb_state << kReadBarrierStateShift) |
- (kStateFat << kStateShift)) {
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+inline LockWord::LockWord(Monitor* mon, uint32_t gc_state)
+ : value_(mon->GetMonitorId() | (gc_state << kGCStateShift) | (kStateFat << kStateShift)) {
#ifndef __LP64__
DCHECK_ALIGNED(mon, kMonitorIdAlignment);
#endif
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 5d0d204..538b6eb 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -35,27 +35,27 @@
* the state. The four possible states are fat locked, thin/unlocked, hash code, and forwarding
* address. When the lock word is in the "thin" state and its bits are formatted as follows:
*
- * |33|22|222222221111|1111110000000000|
- * |10|98|765432109876|5432109876543210|
- * |00|rb| lock count |thread id owner |
+ * |33|2|2|222222221111|1111110000000000|
+ * |10|9|8|765432109876|5432109876543210|
+ * |00|m|r| lock count |thread id owner |
*
* When the lock word is in the "fat" state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |01|rb| MonitorId |
+ * |33|2|2|2222222211111111110000000000|
+ * |10|9|8|7654321098765432109876543210|
+ * |01|m|r| MonitorId |
*
* When the lock word is in hash state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |10|rb| HashCode |
+ * |33|2|2|2222222211111111110000000000|
+ * |10|9|8|7654321098765432109876543210|
+ * |10|m|r| HashCode |
*
- * When the lock word is in fowarding address state and its bits are formatted as follows:
+ * When the lock word is in forwarding address state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |11| ForwardingAddress |
+ * |33|2|22222222211111111110000000000|
+ * |10|9|87654321098765432109876543210|
+ * |11|0| ForwardingAddress |
*
* The rb bits store the read barrier state.
*/
@@ -64,11 +64,13 @@
enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
- kReadBarrierStateSize = 2,
+ kReadBarrierStateSize = 1,
+ kMarkBitStateSize = 1,
// Number of bits to encode the thin lock owner.
kThinLockOwnerSize = 16,
// Remaining bits are the recursive lock count.
- kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize,
+ kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
+ kMarkBitStateSize,
// Thin lock bits. Owner in lowest bits.
kThinLockOwnerShift = 0,
@@ -81,25 +83,43 @@
kThinLockCountOne = 1 << kThinLockCountShift, // == 65536 (0x10000)
// State in the highest bits.
- kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift,
+ kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
+ kMarkBitStateSize,
kStateMask = (1 << kStateSize) - 1,
kStateMaskShifted = kStateMask << kStateShift,
kStateThinOrUnlocked = 0,
kStateFat = 1,
kStateHash = 2,
kStateForwardingAddress = 3,
+
+ // Read barrier bit.
kReadBarrierStateShift = kThinLockCountSize + kThinLockCountShift,
kReadBarrierStateMask = (1 << kReadBarrierStateSize) - 1,
kReadBarrierStateMaskShifted = kReadBarrierStateMask << kReadBarrierStateShift,
kReadBarrierStateMaskShiftedToggled = ~kReadBarrierStateMaskShifted,
+ // Mark bit.
+ kMarkBitStateShift = kReadBarrierStateSize + kReadBarrierStateShift,
+ kMarkBitStateMask = (1 << kMarkBitStateSize) - 1,
+ kMarkBitStateMaskShifted = kMarkBitStateMask << kMarkBitStateShift,
+ kMarkBitStateMaskShiftedToggled = ~kMarkBitStateMaskShifted,
+
+ // GC state is mark bit and read barrier state.
+ kGCStateSize = kReadBarrierStateSize + kMarkBitStateSize,
+ kGCStateShift = kReadBarrierStateShift,
+ kGCStateMaskShifted = kReadBarrierStateMaskShifted | kMarkBitStateMaskShifted,
+ kGCStateMaskShiftedToggled = ~kGCStateMaskShifted,
+
// When the state is kHashCode, the non-state bits hold the hashcode.
// Note Object.hashCode() has the hash code layout hardcoded.
kHashShift = 0,
- kHashSize = 32 - kStateSize - kReadBarrierStateSize,
+ kHashSize = 32 - kStateSize - kReadBarrierStateSize - kMarkBitStateSize,
kHashMask = (1 << kHashSize) - 1,
kMaxHash = kHashMask,
+ // Forwarding address shift.
+ kForwardingAddressShift = kObjectAlignmentShift,
+
kMonitorIdShift = kHashShift,
kMonitorIdSize = kHashSize,
kMonitorIdMask = kHashMask,
@@ -108,31 +128,31 @@
kMaxMonitorId = kMaxHash
};
- static LockWord FromThinLockId(uint32_t thread_id, uint32_t count, uint32_t rb_state) {
+ static LockWord FromThinLockId(uint32_t thread_id, uint32_t count, uint32_t gc_state) {
CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockMaxOwner));
CHECK_LE(count, static_cast<uint32_t>(kThinLockMaxCount));
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
- return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
- (rb_state << kReadBarrierStateShift) |
+ // DCHECK_EQ(gc_bits & kGCStateMaskToggled, 0U);
+ return LockWord((thread_id << kThinLockOwnerShift) |
+ (count << kThinLockCountShift) |
+ (gc_state << kGCStateShift) |
(kStateThinOrUnlocked << kStateShift));
}
static LockWord FromForwardingAddress(size_t target) {
DCHECK_ALIGNED(target, (1 << kStateSize));
- return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift));
+ return LockWord((target >> kForwardingAddressShift) | (kStateForwardingAddress << kStateShift));
}
- static LockWord FromHashCode(uint32_t hash_code, uint32_t rb_state) {
+ static LockWord FromHashCode(uint32_t hash_code, uint32_t gc_state) {
CHECK_LE(hash_code, static_cast<uint32_t>(kMaxHash));
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+ // DCHECK_EQ(gc_bits & kGCStateMaskToggled, 0U);
return LockWord((hash_code << kHashShift) |
- (rb_state << kReadBarrierStateShift) |
+ (gc_state << kGCStateShift) |
(kStateHash << kStateShift));
}
- static LockWord FromDefault(uint32_t rb_state) {
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
- return LockWord(rb_state << kReadBarrierStateShift);
+ static LockWord FromDefault(uint32_t gc_state) {
+ return LockWord(gc_state << kGCStateShift);
}
static bool IsDefault(LockWord lw) {
@@ -154,7 +174,7 @@
LockState GetState() const {
CheckReadBarrierState();
if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) ||
- (kUseReadBarrier && UNLIKELY((value_ & kReadBarrierStateMaskShiftedToggled) == 0))) {
+ (kUseReadBarrier && UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0))) {
return kUnlocked;
} else {
uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
@@ -176,6 +196,10 @@
return (value_ >> kReadBarrierStateShift) & kReadBarrierStateMask;
}
+ uint32_t GCState() const {
+ return (value_ & kGCStateMaskShifted) >> kGCStateShift;
+ }
+
void SetReadBarrierState(uint32_t rb_state) {
DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
@@ -184,6 +208,19 @@
value_ |= (rb_state & kReadBarrierStateMask) << kReadBarrierStateShift;
}
+
+ uint32_t MarkBitState() const {
+ return (value_ >> kMarkBitStateShift) & kMarkBitStateMask;
+ }
+
+ void SetMarkBitState(uint32_t mark_bit) {
+ DCHECK_EQ(mark_bit & ~kMarkBitStateMask, 0U);
+ DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
+ // Clear and or the bits.
+ value_ &= kMarkBitStateMaskShiftedToggled;
+ value_ |= mark_bit << kMarkBitStateShift;
+ }
+
// Return the owner thin lock thread id.
uint32_t ThinLockOwner() const;
@@ -197,7 +234,7 @@
size_t ForwardingAddress() const;
// Constructor a lock word for inflation to use a Monitor.
- LockWord(Monitor* mon, uint32_t rb_state);
+ LockWord(Monitor* mon, uint32_t gc_state);
// Return the hash code stored in the lock word, must be kHashCode state.
int32_t GetHashCode() const;
@@ -207,7 +244,7 @@
if (kIncludeReadBarrierState) {
return lw1.GetValue() == lw2.GetValue();
}
- return lw1.GetValueWithoutReadBarrierState() == lw2.GetValueWithoutReadBarrierState();
+ return lw1.GetValueWithoutGCState() == lw2.GetValueWithoutGCState();
}
void Dump(std::ostream& os) {
@@ -248,9 +285,9 @@
return value_;
}
- uint32_t GetValueWithoutReadBarrierState() const {
+ uint32_t GetValueWithoutGCState() const {
CheckReadBarrierState();
- return value_ & ~(kReadBarrierStateMask << kReadBarrierStateShift);
+ return value_ & kGCStateMaskShiftedToggled;
}
// Only Object should be converting LockWords to/from uints.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8f5419c..8ad47eb 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -636,8 +636,9 @@
static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
"art::Primitive::Type and int32_t have different sizes.");
int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
- Primitive::Type type = static_cast<Primitive::Type>(v32 & 0xFFFF);
- DCHECK_EQ(static_cast<size_t>(v32 >> 16), Primitive::ComponentSizeShift(type));
+ Primitive::Type type = static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask);
+ DCHECK_EQ(static_cast<size_t>(v32 >> kPrimitiveTypeSizeShiftShift),
+ Primitive::ComponentSizeShift(type));
return type;
}
@@ -646,8 +647,9 @@
static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
"art::Primitive::Type and int32_t have different sizes.");
int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
- size_t size_shift = static_cast<Primitive::Type>(v32 >> 16);
- DCHECK_EQ(size_shift, Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & 0xFFFF)));
+ size_t size_shift = static_cast<Primitive::Type>(v32 >> kPrimitiveTypeSizeShiftShift);
+ DCHECK_EQ(size_shift,
+ Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask)));
return size_shift;
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5c490de..8f6ce44 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -64,6 +64,12 @@
// 2 ref instance fields.]
static constexpr uint32_t kClassWalkSuper = 0xC0000000;
+ // Shift primitive type by kPrimitiveTypeSizeShiftShift to get the component type size shift
+ // Used for computing array size as follows:
+ // array_bytes = header_size + (elements << (primitive_type >> kPrimitiveTypeSizeShiftShift))
+ static constexpr uint32_t kPrimitiveTypeSizeShiftShift = 16;
+ static constexpr uint32_t kPrimitiveTypeMask = (1u << kPrimitiveTypeSizeShiftShift) - 1;
+
// Class Status
//
// kStatusRetired: Class that's temporarily used till class linking time
@@ -371,10 +377,10 @@
void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
- int32_t v32 = static_cast<int32_t>(new_type);
- DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
+ uint32_t v32 = static_cast<uint32_t>(new_type);
+ DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
// Store the component size shift in the upper 16 bits.
- v32 |= Primitive::ComponentSizeShift(new_type) << 16;
+ v32 |= Primitive::ComponentSizeShift(new_type) << kPrimitiveTypeSizeShiftShift;
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), v32);
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 0592c6c..0495c95 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -147,10 +147,20 @@
#endif
}
+inline uint32_t Object::GetMarkBit() {
+#ifdef USE_READ_BARRIER
+ return GetLockWord(false).MarkBitState();
+#else
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+#endif
+}
+
inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
SetLockWord(lw, false);
@@ -173,6 +183,8 @@
DCHECK(kUseBakerReadBarrier);
DCHECK_EQ(reinterpret_cast<uint64_t>(expected_rb_ptr) >> 32, 0U);
DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ DCHECK_NE(expected_rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+ DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
LockWord expected_lw;
LockWord new_lw;
do {
@@ -216,6 +228,24 @@
#endif
}
+inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
+ LockWord expected_lw;
+ LockWord new_lw;
+ do {
+ LockWord lw = GetLockWord(false);
+ if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) {
+ // Lost the race.
+ return false;
+ }
+ expected_lw = lw;
+ new_lw = lw;
+ new_lw.SetMarkBitState(mark_bit);
+ // Since this is only set from the mutator, we can use the non release Cas.
+ } while (!CasLockWordWeakRelaxed(expected_lw, new_lw));
+ return true;
+}
+
+
inline void Object::AssertReadBarrierPointer() const {
if (kUseBakerReadBarrier) {
Object* obj = const_cast<Object*>(this);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 701c600..13c536e 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -163,8 +163,7 @@
case LockWord::kUnlocked: {
// Try to compare and swap in a new hash, if we succeed we will return the hash on the next
// loop iteration.
- LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(),
- lw.ReadBarrierState());
+ LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
return hash_word.GetHashCode();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index a4bdbad..5b129bf 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -93,6 +93,7 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ // TODO: Clean this up and change to return int32_t
Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
@@ -103,6 +104,12 @@
template<bool kCasRelease = false>
ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE uint32_t GetMarkBit() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index bf9f931..e863ea9 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -155,7 +155,7 @@
return false;
}
}
- LockWord fat(this, lw.ReadBarrierState());
+ LockWord fat(this, lw.GCState());
// Publish the updated lock word, which may race with other threads.
bool success = GetObject()->CasLockWordWeakSequentiallyConsistent(lw, fat);
// Lock profiling.
@@ -774,20 +774,21 @@
return false;
}
// Deflate to a thin lock.
- LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_,
- lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(),
+ monitor->lock_count_,
+ lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
<< monitor->lock_count_;
} else if (monitor->HasHashCode()) {
- LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
} else {
// No lock and no hash, just put an empty lock word inside the object.
- LockWord new_lw = LockWord::FromDefault(lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromDefault(lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
@@ -876,7 +877,7 @@
LockWord lock_word = h_obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
- LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.ReadBarrierState()));
+ LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
// CasLockWord enforces more than the acquire ordering we need here.
@@ -890,8 +891,9 @@
// We own the lock, increase the recursion count.
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
- LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count,
- lock_word.ReadBarrierState()));
+ LockWord thin_locked(LockWord::FromThinLockId(thread_id,
+ new_count,
+ lock_word.GCState()));
if (!kUseReadBarrier) {
h_obj->SetLockWord(thin_locked, true);
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
@@ -975,9 +977,9 @@
LockWord new_lw = LockWord::Default();
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
- new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.ReadBarrierState());
+ new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.GCState());
} else {
- new_lw = LockWord::FromDefault(lock_word.ReadBarrierState());
+ new_lw = LockWord::FromDefault(lock_word.GCState());
}
if (!kUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
new file mode 100644
index 0000000..c20c8b8
--- /dev/null
+++ b/runtime/native_stack_dump.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_stack_dump.h"
+
+#include <ostream>
+
+#include <stdio.h>
+
+#include "art_method.h"
+
+// For DumpNativeStack.
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
+#if defined(__linux__)
+
+#include <memory>
+#include <vector>
+
+#include <linux/unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include "arch/instruction_set.h"
+#include "base/memory_tool.h"
+#include "base/mutex.h"
+#include "base/stringprintf.h"
+#include "base/unix_file/fd_file.h"
+#include "oat_quick_method_header.h"
+#include "os.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+#endif
+
+namespace art {
+
+#if defined(__linux__)
+
+static constexpr bool kUseAddr2line = !kIsTargetBuild;
+
+ALWAYS_INLINE
+static inline void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
+ if (prefix != nullptr) {
+ os << prefix;
+ }
+ os << " ";
+ if (!odd) {
+ os << " ";
+ }
+}
+
+// The state of an open pipe to addr2line. In "server" mode, addr2line takes input on stdin
+// and prints the result to stdout. This struct keeps the state of the open connection.
+struct Addr2linePipe {
+ Addr2linePipe(int in_fd, int out_fd, const std::string& file_name, pid_t pid)
+ : in(in_fd, false), out(out_fd, false), file(file_name), child_pid(pid), odd(true) {}
+
+ ~Addr2linePipe() {
+ kill(child_pid, SIGKILL);
+ }
+
+ File in; // The file descriptor that is connected to the output of addr2line.
+ File out; // The file descriptor that is connected to the input of addr2line.
+
+ const std::string file; // The file addr2line is working on, so that we know when to close
+ // and restart.
+ const pid_t child_pid; // The pid of the child, which we should kill when we're done.
+ bool odd; // Print state for indentation of lines.
+};
+
+static std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const char* args[]) {
+ int caller_to_addr2line[2];
+ int addr2line_to_caller[2];
+
+ if (pipe(caller_to_addr2line) == -1) {
+ return nullptr;
+ }
+ if (pipe(addr2line_to_caller) == -1) {
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ return nullptr;
+ }
+
+ pid_t pid = fork();
+ if (pid == -1) {
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ close(addr2line_to_caller[1]);
+ close(addr2line_to_caller[1]);
+ return nullptr;
+ }
+
+ if (pid == 0) {
+ dup2(caller_to_addr2line[0], STDIN_FILENO);
+ dup2(addr2line_to_caller[1], STDOUT_FILENO);
+
+ close(caller_to_addr2line[0]);
+ close(caller_to_addr2line[1]);
+ close(addr2line_to_caller[0]);
+ close(addr2line_to_caller[1]);
+
+ execv(args[0], const_cast<char* const*>(args));
+ exit(1);
+ } else {
+ close(caller_to_addr2line[0]);
+ close(addr2line_to_caller[1]);
+ return std::unique_ptr<Addr2linePipe>(new Addr2linePipe(addr2line_to_caller[0],
+ caller_to_addr2line[1],
+ name,
+ pid));
+ }
+}
+
+static void Drain(size_t expected,
+ const char* prefix,
+ std::unique_ptr<Addr2linePipe>* pipe /* inout */,
+ std::ostream& os) {
+ DCHECK(pipe != nullptr);
+ DCHECK(pipe->get() != nullptr);
+ int in = pipe->get()->in.Fd();
+ DCHECK_GE(in, 0);
+
+ bool prefix_written = false;
+
+ for (;;) {
+ constexpr uint32_t kWaitTimeExpectedMicros = 500 * 1000;
+ constexpr uint32_t kWaitTimeUnexpectedMicros = 50 * 1000;
+
+ struct timeval tv;
+ tv.tv_sec = 0;
+ tv.tv_usec = expected > 0 ? kWaitTimeExpectedMicros : kWaitTimeUnexpectedMicros;
+
+ fd_set rfds;
+ FD_ZERO(&rfds);
+ FD_SET(in, &rfds);
+
+ int retval = TEMP_FAILURE_RETRY(select(in + 1, &rfds, nullptr, nullptr, &tv));
+
+ if (retval < 0) {
+ // Other side may have crashed or other errors.
+ pipe->reset();
+ return;
+ }
+
+ if (retval == 0) {
+ // Timeout.
+ return;
+ }
+
+ DCHECK_EQ(retval, 1);
+
+ constexpr size_t kMaxBuffer = 128; // Relatively small buffer. Should be OK as we're on an
+ // alt stack, but just to be sure...
+ char buffer[kMaxBuffer];
+ memset(buffer, 0, kMaxBuffer);
+ int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
+
+ if (bytes_read < 0) {
+ // This should not really happen...
+ pipe->reset();
+ return;
+ }
+
+ char* tmp = buffer;
+ while (*tmp != 0) {
+ if (!prefix_written) {
+ WritePrefix(os, prefix, (*pipe)->odd);
+ prefix_written = true;
+ }
+ char* new_line = strchr(tmp, '\n');
+ if (new_line == nullptr) {
+ os << tmp;
+
+ break;
+ } else {
+ char saved = *(new_line + 1);
+ *(new_line + 1) = 0;
+ os << tmp;
+ *(new_line + 1) = saved;
+
+ tmp = new_line + 1;
+ prefix_written = false;
+ (*pipe)->odd = !(*pipe)->odd;
+
+ if (expected > 0) {
+ expected--;
+ }
+ }
+ }
+ }
+}
+
+static void Addr2line(const std::string& map_src,
+ uintptr_t offset,
+ std::ostream& os,
+ const char* prefix,
+ std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
+ DCHECK(pipe != nullptr);
+
+ if (map_src == "[vdso]") {
+ // Special-case this, our setup has problems with this.
+ return;
+ }
+
+ if (*pipe == nullptr || (*pipe)->file != map_src) {
+ if (*pipe != nullptr) {
+ Drain(0, prefix, pipe, os);
+ }
+ pipe->reset(); // Close early.
+
+ const char* args[7] = {
+ "/usr/bin/addr2line",
+ "--functions",
+ "--inlines",
+ "--demangle",
+ "-e",
+ map_src.c_str(),
+ nullptr
+ };
+ *pipe = Connect(map_src, args);
+ }
+
+ Addr2linePipe* pipe_ptr = pipe->get();
+ if (pipe_ptr == nullptr) {
+ // Failed...
+ return;
+ }
+
+ // Send the offset.
+ const std::string hex_offset = StringPrintf("%zx\n", offset);
+
+ if (!pipe_ptr->out.WriteFully(hex_offset.data(), hex_offset.length())) {
+ // Error. :-(
+ pipe->reset();
+ return;
+ }
+
+ // Now drain (expecting two lines).
+ Drain(2U, prefix, pipe, os);
+}
+
+static bool RunCommand(std::string cmd) {
+ FILE* stream = popen(cmd.c_str(), "r");
+ if (stream) {
+ pclose(stream);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFETY_ANALYSIS {
+ uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+ method->GetEntryPointFromQuickCompiledCode()));
+ if (code == 0) {
+ return pc == 0;
+ }
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return code <= pc && pc <= (code + code_size);
+}
+
+void DumpNativeStack(std::ostream& os,
+ pid_t tid,
+ BacktraceMap* existing_map,
+ const char* prefix,
+ ArtMethod* current_method,
+ void* ucontext_ptr) {
+ // b/18119146
+ if (RUNNING_ON_MEMORY_TOOL != 0) {
+ return;
+ }
+
+ BacktraceMap* map = existing_map;
+ std::unique_ptr<BacktraceMap> tmp_map;
+ if (map == nullptr) {
+ tmp_map.reset(BacktraceMap::Create(getpid()));
+ map = tmp_map.get();
+ }
+ std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
+ if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
+ os << prefix << "(backtrace::Unwind failed for thread " << tid
+ << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")\n";
+ return;
+ } else if (backtrace->NumFrames() == 0) {
+ os << prefix << "(no native stack frames for thread " << tid << ")\n";
+ return;
+ }
+
+ // Check whether we have and should use addr2line.
+ bool use_addr2line;
+ if (kUseAddr2line) {
+ // Try to run it to see whether we have it. Push an argument so that it doesn't assume a.out
+ // and print to stderr.
+ use_addr2line = (gAborting > 0) && RunCommand("addr2line -h");
+ } else {
+ use_addr2line = false;
+ }
+
+ std::unique_ptr<Addr2linePipe> addr2line_state;
+
+ for (Backtrace::const_iterator it = backtrace->begin();
+ it != backtrace->end(); ++it) {
+ // We produce output like this:
+ // ] #00 pc 000075bb8 /system/lib/libc.so (unwind_backtrace_thread+536)
+ // In order for parsing tools to continue to function, the stack dump
+ // format must at least adhere to this format:
+ // #XX pc <RELATIVE_ADDR> <FULL_PATH_TO_SHARED_LIBRARY> ...
+ // The parsers require a single space before and after pc, and two spaces
+ // after the <RELATIVE_ADDR>. There can be any prefix data before the
+ // #XX. <RELATIVE_ADDR> has to be a hex number but with no 0x prefix.
+ os << prefix << StringPrintf("#%02zu pc ", it->num);
+ bool try_addr2line = false;
+ if (!BacktraceMap::IsValid(it->map)) {
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???"
+ : "%08" PRIxPTR " ???",
+ it->pc);
+ } else {
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " "
+ : "%08" PRIxPTR " ",
+ BacktraceMap::GetRelativePc(it->map, it->pc));
+ os << it->map.name;
+ os << " (";
+ if (!it->func_name.empty()) {
+ os << it->func_name;
+ if (it->func_offset != 0) {
+ os << "+" << it->func_offset;
+ }
+ try_addr2line = true;
+ } else if (current_method != nullptr &&
+ Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ PcIsWithinQuickCode(current_method, it->pc)) {
+ const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
+ os << JniLongName(current_method) << "+"
+ << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
+ } else {
+ os << "???";
+ }
+ os << ")";
+ }
+ os << "\n";
+ if (try_addr2line && use_addr2line) {
+ Addr2line(it->map.name, it->pc - it->map.start, os, prefix, &addr2line_state);
+ }
+ }
+
+ if (addr2line_state != nullptr) {
+ Drain(0, prefix, &addr2line_state, os);
+ }
+}
+
+void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool include_count) {
+ if (tid == GetTid()) {
+ // There's no point showing that we're reading our stack out of /proc!
+ return;
+ }
+
+ std::string kernel_stack_filename(StringPrintf("/proc/self/task/%d/stack", tid));
+ std::string kernel_stack;
+ if (!ReadFileToString(kernel_stack_filename, &kernel_stack)) {
+ os << prefix << "(couldn't read " << kernel_stack_filename << ")\n";
+ return;
+ }
+
+ std::vector<std::string> kernel_stack_frames;
+ Split(kernel_stack, '\n', &kernel_stack_frames);
+ // We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
+ // which looking at the source appears to be the kernel's way of saying "that's all, folks!".
+ kernel_stack_frames.pop_back();
+ for (size_t i = 0; i < kernel_stack_frames.size(); ++i) {
+ // Turn "[<ffffffff8109156d>] futex_wait_queue_me+0xcd/0x110"
+ // into "futex_wait_queue_me+0xcd/0x110".
+ const char* text = kernel_stack_frames[i].c_str();
+ const char* close_bracket = strchr(text, ']');
+ if (close_bracket != nullptr) {
+ text = close_bracket + 2;
+ }
+ os << prefix;
+ if (include_count) {
+ os << StringPrintf("#%02zd ", i);
+ }
+ os << text << "\n";
+ }
+}
+
+#elif defined(__APPLE__)
+
+void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
+ pid_t tid ATTRIBUTE_UNUSED,
+ BacktraceMap* existing_map ATTRIBUTE_UNUSED,
+ const char* prefix ATTRIBUTE_UNUSED,
+ ArtMethod* current_method ATTRIBUTE_UNUSED,
+ void* ucontext_ptr ATTRIBUTE_UNUSED) {
+}
+
+void DumpKernelStack(std::ostream& os ATTRIBUTE_UNUSED,
+ pid_t tid ATTRIBUTE_UNUSED,
+ const char* prefix ATTRIBUTE_UNUSED,
+ bool include_count ATTRIBUTE_UNUSED) {
+}
+
+#else
+#error "Unsupported architecture for native stack dumps."
+#endif
+
+} // namespace art
diff --git a/runtime/native_stack_dump.h b/runtime/native_stack_dump.h
new file mode 100644
index 0000000..d64bc82
--- /dev/null
+++ b/runtime/native_stack_dump.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_STACK_DUMP_H_
+#define ART_RUNTIME_NATIVE_STACK_DUMP_H_
+
+#include <unistd.h>
+
+#include <iosfwd>
+
+#include "base/macros.h"
+
+class BacktraceMap;
+
+namespace art {
+
+class ArtMethod;
+
+// Dumps the native stack for thread 'tid' to 'os'.
+void DumpNativeStack(std::ostream& os,
+ pid_t tid,
+ BacktraceMap* map = nullptr,
+ const char* prefix = "",
+ ArtMethod* current_method = nullptr,
+ void* ucontext = nullptr)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
+void DumpKernelStack(std::ostream& os,
+ pid_t tid,
+ const char* prefix = "",
+ bool include_count = true);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_STACK_DUMP_H_
diff --git a/runtime/oat.h b/runtime/oat.h
index 2c5c3e6..7c84fe9 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '8', '5', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '8', '6', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 68610a7..5752fd9 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1073,7 +1073,7 @@
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
- bool warn_if_not_found) const {
+ std::string* error_msg) const {
// NOTE: We assume here that the canonical location for a given dex_location never
// changes. If it does (i.e. some symlink used by the filename changes) we may return
// an incorrect OatDexFile. As long as we have a checksum to check, we shall return
@@ -1115,32 +1115,29 @@
secondary_oat_dex_files_.PutBefore(secondary_lb, key_copy, oat_dex_file);
}
}
- if (oat_dex_file != nullptr &&
- (dex_location_checksum == nullptr ||
- oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum)) {
- return oat_dex_file;
+
+ if (oat_dex_file == nullptr) {
+ if (error_msg != nullptr) {
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ *error_msg = "Failed to find OatDexFile for DexFile " + std::string(dex_location)
+ + " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation();
+ }
+ return nullptr;
}
- if (warn_if_not_found) {
- std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
- std::string checksum("<unspecified>");
- if (dex_location_checksum != nullptr) {
- checksum = StringPrintf("0x%08x", *dex_location_checksum);
+ if (dex_location_checksum != nullptr &&
+ oat_dex_file->GetDexFileLocationChecksum() != *dex_location_checksum) {
+ if (error_msg != nullptr) {
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ std::string checksum = StringPrintf("0x%08x", oat_dex_file->GetDexFileLocationChecksum());
+ std::string required_checksum = StringPrintf("0x%08x", *dex_location_checksum);
+ *error_msg = "OatDexFile for DexFile " + std::string(dex_location)
+ + " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation()
+ + " has checksum " + checksum + " but " + required_checksum + " was required";
}
- LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
- << " ( canonical path " << dex_canonical_location << ")"
- << " with checksum " << checksum << " in OatFile " << GetLocation();
- if (kIsDebugBuild) {
- for (const OatDexFile* odf : oat_dex_files_storage_) {
- LOG(WARNING) << "OatFile " << GetLocation()
- << " contains OatDexFile " << odf->GetDexFileLocation()
- << " (canonical path " << odf->GetCanonicalDexFileLocation() << ")"
- << " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum();
- }
- }
+ return nullptr;
}
-
- return nullptr;
+ return oat_dex_file;
}
OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index aa727ff..f5ab9dc 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -213,9 +213,15 @@
friend class art::OatDexFile;
};
+
+ // Get the OatDexFile for the given dex_location within this oat file.
+ // If dex_location_checksum is non-null, the OatDexFile will only be
+ // returned if it has a matching checksum.
+ // If error_msg is non-null and no OatDexFile is returned, error_msg will
+ // be updated with a description of why no OatDexFile was returned.
const OatDexFile* GetOatDexFile(const char* dex_location,
const uint32_t* const dex_location_checksum,
- bool exception_if_not_found = true) const
+ /*out*/std::string* error_msg = nullptr) const
REQUIRES(!secondary_lookup_lock_);
const std::vector<const OatDexFile*>& GetOatDexFiles() const {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index fd58907..2c2a2b8 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -277,10 +277,9 @@
// Load the primary dex file.
std::string error_msg;
const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
- dex_location, nullptr, false);
+ dex_location, nullptr, &error_msg);
if (oat_dex_file == nullptr) {
- LOG(WARNING) << "Attempt to load out-of-date oat file "
- << oat_file.GetLocation() << " for dex location " << dex_location;
+ LOG(WARNING) << error_msg;
return std::vector<std::unique_ptr<const DexFile>>();
}
@@ -294,7 +293,7 @@
// Load secondary multidex files
for (size_t i = 1; ; i++) {
std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location);
- oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr);
if (oat_dex_file == nullptr) {
// There are no more secondary dex files to load.
break;
@@ -389,25 +388,25 @@
// Verify the dex checksum.
// Note: GetOatDexFile will return null if the dex checksum doesn't match
// what we provide, which verifies the primary dex checksum for us.
+ std::string error_msg;
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
- dex_location_.c_str(), dex_checksum_pointer, false);
+ dex_location_.c_str(), dex_checksum_pointer, &error_msg);
if (oat_dex_file == nullptr) {
+ VLOG(oat) << error_msg;
return kOatOutOfDate;
}
// Verify the dex checksums for any secondary multidex files
for (size_t i = 1; ; i++) {
- std::string secondary_dex_location
- = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
+ std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
const OatFile::OatDexFile* secondary_oat_dex_file
- = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr);
if (secondary_oat_dex_file == nullptr) {
// There are no more secondary dex files to check.
break;
}
- std::string error_msg;
uint32_t expected_secondary_checksum = 0;
if (DexFile::GetChecksum(secondary_dex_location.c_str(),
&expected_secondary_checksum, &error_msg)) {
@@ -429,7 +428,6 @@
}
CompilerFilter::Filter current_compiler_filter = file.GetCompilerFilter();
- VLOG(oat) << "Compiler filter for " << file.GetLocation() << " is " << current_compiler_filter;
// Verify the image checksum
if (CompilerFilter::DependsOnImageChecksum(current_compiler_filter)) {
@@ -760,8 +758,8 @@
// Get the checksum from the odex if we can.
const OatFile* odex_file = odex_.GetFile();
if (odex_file != nullptr) {
- const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
- dex_location_.c_str(), nullptr, false);
+ const OatFile::OatDexFile* odex_dex_file
+ = odex_file->GetOatDexFile(dex_location_.c_str(), nullptr);
if (odex_dex_file != nullptr) {
cached_required_dex_checksum_ = odex_dex_file->GetDexFileLocationChecksum();
required_dex_checksum_found_ = true;
@@ -867,6 +865,8 @@
status_ = kOatOutOfDate;
} else {
status_ = oat_file_assistant_->GivenOatFileStatus(*file);
+ VLOG(oat) << file->GetLocation() << " is " << status_
+ << " with filter " << file->GetCompilerFilter();
}
}
return status_;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 39848b4..05c5a22 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -320,6 +320,34 @@
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
+// Case: We have a DEX file and ODEX file for a different dex location.
+// Expect: The status is kDex2OatNeeded.
+TEST_F(OatFileAssistantTest, OatForDifferentDex) {
+ // Generate an odex file for OatForDifferentDex_A.jar
+ std::string dex_location_a = GetScratchDir() + "/OatForDifferentDex_A.jar";
+ std::string odex_location = GetOdexDir() + "/OatForDifferentDex.odex";
+ Copy(GetDexSrc1(), dex_location_a);
+ GenerateOdexForTest(dex_location_a, odex_location, CompilerFilter::kSpeed);
+
+ // Try to use that odex file for OatForDifferentDex.jar
+ std::string dex_location = GetScratchDir() + "/OatForDifferentDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
// Case: We have a DEX file and speed-profile OAT file for it.
// Expect: The status is kNoDexOptNeeded if the profile hasn't changed, but
// kDex2Oat if the profile has changed.
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index 1d1413b..1db09b4 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -53,7 +53,7 @@
File* OS::OpenFileWithFlags(const char* name, int flags) {
CHECK(name != nullptr);
- bool read_only = (flags == O_RDONLY);
+ bool read_only = ((flags & O_ACCMODE) == O_RDONLY);
std::unique_ptr<File> file(new File(name, flags, 0666, !read_only));
if (!file->IsOpened()) {
return nullptr;
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 42e959c..5d32c09 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -99,8 +99,9 @@
// Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
static constexpr uintptr_t gray_ptr_ = 0x1; // Marked, but not marked through. On mark stack.
+ // TODO: black_ptr_ is unused, we should remove it.
static constexpr uintptr_t black_ptr_ = 0x2; // Marked through. Used for non-moving objects.
- static constexpr uintptr_t rb_ptr_mask_ = 0x3; // The low 2 bits for white|gray|black.
+ static constexpr uintptr_t rb_ptr_mask_ = 0x1; // The low bits for white|gray.
};
} // namespace art
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index bfa8c54..265587d 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -45,9 +45,11 @@
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
} else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAll)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
- } else {
- DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly));
+ } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
+ } else {
+ DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kSaveEverything));
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveEverything);
}
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index ca270a6..68fa0d3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -114,6 +114,7 @@
#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
#include "native/sun_misc_Unsafe.h"
#include "native_bridge_art_interface.h"
+#include "native_stack_dump.h"
#include "oat_file.h"
#include "oat_file_manager.h"
#include "os.h"
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9f64e48..c935c90 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -383,9 +383,10 @@
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
- kSaveAll,
+ kSaveAll, // All callee-save registers.
kRefsOnly,
kRefsAndArgs,
+ kSaveEverything, // Even caller-save registers.
kLastCalleeSaveType // Value used for iteration
};
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index bc963c5..60ebabc 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -28,6 +28,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "base/stringprintf.h"
+#include "native_stack_dump.h"
#include "thread-inl.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 76f3161..3326736 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -55,6 +55,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
+#include "native_stack_dump.h"
#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "object_lock.h"
@@ -1817,22 +1818,12 @@
ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
tlsPtr_.jni_env->ExceptionClear();
- // If the thread has its own handler, use that.
- ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
- tlsPtr_.jni_env->GetObjectField(peer.get(),
- WellKnownClasses::java_lang_Thread_uncaughtHandler));
- if (handler.get() == nullptr) {
- // Otherwise use the thread group's default handler.
- handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
- WellKnownClasses::java_lang_Thread_group));
- }
+ // Call the Thread instance's dispatchUncaughtException(Throwable)
+ tlsPtr_.jni_env->CallVoidMethod(peer.get(),
+ WellKnownClasses::java_lang_Thread_dispatchUncaughtException,
+ exception.get());
- // Call the handler.
- tlsPtr_.jni_env->CallVoidMethod(handler.get(),
- WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
- peer.get(), exception.get());
-
- // If the handler threw, clear that exception too.
+ // If the dispatchUncaughtException threw, clear that exception too.
tlsPtr_.jni_env->ExceptionClear();
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 16ef0ff..419ecec 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -35,6 +35,7 @@
#include "jni_internal.h"
#include "lock_word.h"
#include "monitor.h"
+#include "native_stack_dump.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
#include "trace.h"
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 3f779df..515ba9f 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -46,20 +46,12 @@
#include <sys/syscall.h>
#endif
-// For DumpNativeStack.
-#include <backtrace/Backtrace.h>
-#include <backtrace/BacktraceMap.h>
-
#if defined(__linux__)
#include <linux/unistd.h>
#endif
namespace art {
-#if defined(__linux__)
-static constexpr bool kUseAddr2line = !kIsTargetBuild;
-#endif
-
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
@@ -1026,210 +1018,6 @@
return "";
}
-#if defined(__linux__)
-
-ALWAYS_INLINE
-static inline void WritePrefix(std::ostream* os, const char* prefix, bool odd) {
- if (prefix != nullptr) {
- *os << prefix;
- }
- *os << " ";
- if (!odd) {
- *os << " ";
- }
-}
-
-static bool RunCommand(std::string cmd, std::ostream* os, const char* prefix) {
- FILE* stream = popen(cmd.c_str(), "r");
- if (stream) {
- if (os != nullptr) {
- bool odd_line = true; // We indent them differently.
- bool wrote_prefix = false; // Have we already written a prefix?
- constexpr size_t kMaxBuffer = 128; // Relatively small buffer. Should be OK as we're on an
- // alt stack, but just to be sure...
- char buffer[kMaxBuffer];
- while (!feof(stream)) {
- if (fgets(buffer, kMaxBuffer, stream) != nullptr) {
- // Split on newlines.
- char* tmp = buffer;
- for (;;) {
- char* new_line = strchr(tmp, '\n');
- if (new_line == nullptr) {
- // Print the rest.
- if (*tmp != 0) {
- if (!wrote_prefix) {
- WritePrefix(os, prefix, odd_line);
- }
- wrote_prefix = true;
- *os << tmp;
- }
- break;
- }
- if (!wrote_prefix) {
- WritePrefix(os, prefix, odd_line);
- }
- char saved = *(new_line + 1);
- *(new_line + 1) = 0;
- *os << tmp;
- *(new_line + 1) = saved;
- tmp = new_line + 1;
- odd_line = !odd_line;
- wrote_prefix = false;
- }
- }
- }
- }
- pclose(stream);
- return true;
- } else {
- return false;
- }
-}
-
-static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream& os,
- const char* prefix) {
- std::string cmdline(StringPrintf("addr2line --functions --inlines --demangle -e %s %zx",
- map_src.c_str(), offset));
- RunCommand(cmdline.c_str(), &os, prefix);
-}
-
-static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFETY_ANALYSIS {
- uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
- method->GetEntryPointFromQuickCompiledCode()));
- if (code == 0) {
- return pc == 0;
- }
- uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
- return code <= pc && pc <= (code + code_size);
-}
-#endif
-
-void DumpNativeStack(std::ostream& os, pid_t tid, BacktraceMap* existing_map, const char* prefix,
- ArtMethod* current_method, void* ucontext_ptr) {
-#if __linux__
- // b/18119146
- if (RUNNING_ON_MEMORY_TOOL != 0) {
- return;
- }
-
- BacktraceMap* map = existing_map;
- std::unique_ptr<BacktraceMap> tmp_map;
- if (map == nullptr) {
- tmp_map.reset(BacktraceMap::Create(getpid()));
- map = tmp_map.get();
- }
- std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
- if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
- os << prefix << "(backtrace::Unwind failed for thread " << tid
- << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")\n";
- return;
- } else if (backtrace->NumFrames() == 0) {
- os << prefix << "(no native stack frames for thread " << tid << ")\n";
- return;
- }
-
- // Check whether we have and should use addr2line.
- bool use_addr2line;
- if (kUseAddr2line) {
- // Try to run it to see whether we have it. Push an argument so that it doesn't assume a.out
- // and print to stderr.
- use_addr2line = (gAborting > 0) && RunCommand("addr2line -h", nullptr, nullptr);
- } else {
- use_addr2line = false;
- }
-
- for (Backtrace::const_iterator it = backtrace->begin();
- it != backtrace->end(); ++it) {
- // We produce output like this:
- // ] #00 pc 000075bb8 /system/lib/libc.so (unwind_backtrace_thread+536)
- // In order for parsing tools to continue to function, the stack dump
- // format must at least adhere to this format:
- // #XX pc <RELATIVE_ADDR> <FULL_PATH_TO_SHARED_LIBRARY> ...
- // The parsers require a single space before and after pc, and two spaces
- // after the <RELATIVE_ADDR>. There can be any prefix data before the
- // #XX. <RELATIVE_ADDR> has to be a hex number but with no 0x prefix.
- os << prefix << StringPrintf("#%02zu pc ", it->num);
- bool try_addr2line = false;
- if (!BacktraceMap::IsValid(it->map)) {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???"
- : "%08" PRIxPTR " ???",
- it->pc);
- } else {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " "
- : "%08" PRIxPTR " ",
- BacktraceMap::GetRelativePc(it->map, it->pc));
- os << it->map.name;
- os << " (";
- if (!it->func_name.empty()) {
- os << it->func_name;
- if (it->func_offset != 0) {
- os << "+" << it->func_offset;
- }
- try_addr2line = true;
- } else if (current_method != nullptr &&
- Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- PcIsWithinQuickCode(current_method, it->pc)) {
- const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
- os << JniLongName(current_method) << "+"
- << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
- } else {
- os << "???";
- }
- os << ")";
- }
- os << "\n";
- if (try_addr2line && use_addr2line) {
- Addr2line(it->map.name, it->pc - it->map.start, os, prefix);
- }
- }
-#else
- UNUSED(os, tid, existing_map, prefix, current_method, ucontext_ptr);
-#endif
-}
-
-#if defined(__APPLE__)
-
-// TODO: is there any way to get the kernel stack on Mac OS?
-void DumpKernelStack(std::ostream&, pid_t, const char*, bool) {}
-
-#else
-
-void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool include_count) {
- if (tid == GetTid()) {
- // There's no point showing that we're reading our stack out of /proc!
- return;
- }
-
- std::string kernel_stack_filename(StringPrintf("/proc/self/task/%d/stack", tid));
- std::string kernel_stack;
- if (!ReadFileToString(kernel_stack_filename, &kernel_stack)) {
- os << prefix << "(couldn't read " << kernel_stack_filename << ")\n";
- return;
- }
-
- std::vector<std::string> kernel_stack_frames;
- Split(kernel_stack, '\n', &kernel_stack_frames);
- // We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
- // which looking at the source appears to be the kernel's way of saying "that's all, folks!".
- kernel_stack_frames.pop_back();
- for (size_t i = 0; i < kernel_stack_frames.size(); ++i) {
- // Turn "[<ffffffff8109156d>] futex_wait_queue_me+0xcd/0x110"
- // into "futex_wait_queue_me+0xcd/0x110".
- const char* text = kernel_stack_frames[i].c_str();
- const char* close_bracket = strchr(text, ']');
- if (close_bracket != nullptr) {
- text = close_bracket + 2;
- }
- os << prefix;
- if (include_count) {
- os << StringPrintf("#%02zd ", i);
- }
- os << text << "\n";
- }
-}
-
-#endif
-
const char* GetAndroidRoot() {
const char* android_root = getenv("ANDROID_ROOT");
if (android_root == nullptr) {
diff --git a/runtime/utils.h b/runtime/utils.h
index b2746ee..699b732 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -242,21 +242,6 @@
// implementation-defined limit.
void SetThreadName(const char* thread_name);
-// Dumps the native stack for thread 'tid' to 'os'.
-void DumpNativeStack(std::ostream& os,
- pid_t tid,
- BacktraceMap* map = nullptr,
- const char* prefix = "",
- ArtMethod* current_method = nullptr,
- void* ucontext = nullptr)
- NO_THREAD_SAFETY_ANALYSIS;
-
-// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
-void DumpKernelStack(std::ostream& os,
- pid_t tid,
- const char* prefix = "",
- bool include_count = true);
-
// Find $ANDROID_ROOT, /system, or abort.
const char* GetAndroidRoot();
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 355d552..48deb35 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -57,7 +57,6 @@
jclass WellKnownClasses::java_lang_StringFactory;
jclass WellKnownClasses::java_lang_System;
jclass WellKnownClasses::java_lang_Thread;
-jclass WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler;
jclass WellKnownClasses::java_lang_ThreadGroup;
jclass WellKnownClasses::java_lang_Throwable;
jclass WellKnownClasses::java_nio_DirectByteBuffer;
@@ -121,9 +120,9 @@
jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints;
jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder;
jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
+jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException;
jmethodID WellKnownClasses::java_lang_Thread_init;
jmethodID WellKnownClasses::java_lang_Thread_run;
-jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
jmethodID WellKnownClasses::java_lang_ThreadGroup_removeThread;
jmethodID WellKnownClasses::java_nio_DirectByteBuffer_init;
jmethodID WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation;
@@ -141,7 +140,6 @@
jfieldID WellKnownClasses::java_lang_Thread_lock;
jfieldID WellKnownClasses::java_lang_Thread_name;
jfieldID WellKnownClasses::java_lang_Thread_priority;
-jfieldID WellKnownClasses::java_lang_Thread_uncaughtHandler;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
@@ -245,8 +243,6 @@
java_lang_StringFactory = CacheClass(env, "java/lang/StringFactory");
java_lang_System = CacheClass(env, "java/lang/System");
java_lang_Thread = CacheClass(env, "java/lang/Thread");
- java_lang_Thread__UncaughtExceptionHandler = CacheClass(env,
- "java/lang/Thread$UncaughtExceptionHandler");
java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
@@ -273,9 +269,9 @@
java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
+ java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
- java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "threadTerminated", "(Ljava/lang/Thread;)V");
java_nio_DirectByteBuffer_init = CacheMethod(env, java_nio_DirectByteBuffer, false, "<init>", "(JI)V");
libcore_reflect_AnnotationFactory_createAnnotation = CacheMethod(env, libcore_reflect_AnnotationFactory, true, "createAnnotation", "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;");
@@ -349,7 +345,6 @@
java_lang_Thread_lock = CacheField(env, java_lang_Thread, false, "lock", "Ljava/lang/Object;");
java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
- java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtExceptionHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index cc60b4d..c9faf69 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -69,7 +69,6 @@
static jclass java_lang_System;
static jclass java_lang_Thread;
static jclass java_lang_ThreadGroup;
- static jclass java_lang_Thread__UncaughtExceptionHandler;
static jclass java_lang_Throwable;
static jclass java_util_ArrayList;
static jclass java_util_Collections;
@@ -132,9 +131,9 @@
static jmethodID java_lang_StringFactory_newStringFromCodePoints;
static jmethodID java_lang_StringFactory_newStringFromStringBuilder;
static jmethodID java_lang_System_runFinalization;
+ static jmethodID java_lang_Thread_dispatchUncaughtException;
static jmethodID java_lang_Thread_init;
static jmethodID java_lang_Thread_run;
- static jmethodID java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
static jmethodID java_lang_ThreadGroup_removeThread;
static jmethodID java_nio_DirectByteBuffer_init;
static jmethodID libcore_reflect_AnnotationFactory_createAnnotation;
@@ -154,7 +153,6 @@
static jfieldID java_lang_Thread_lock;
static jfieldID java_lang_Thread_name;
static jfieldID java_lang_Thread_priority;
- static jfieldID java_lang_Thread_uncaughtHandler;
static jfieldID java_lang_Thread_nativePeer;
static jfieldID java_lang_ThreadGroup_groups;
static jfieldID java_lang_ThreadGroup_ngroups;
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index e0530d8..0221900 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -220,7 +220,7 @@
InvocationHandler handler = new DummyInvocationHandler();
SimpleInterface proxy =
(SimpleInterface) Proxy.newProxyInstance(SimpleInterface.class.getClassLoader(),
- new Class[] {SimpleInterface.class}, handler);
+ new Class<?>[] {SimpleInterface.class}, handler);
if (testGetMethodID(SimpleInterface.class) == 0) {
throw new AssertionError();
}
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index 9d4618a..d43d374 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -249,6 +249,6 @@
public volatile Object volatileObjectVar = null;
}
- private static native int vmArrayBaseOffset(Class clazz);
- private static native int vmArrayIndexScale(Class clazz);
+ private static native int vmArrayBaseOffset(Class<?> clazz);
+ private static native int vmArrayIndexScale(Class<?> clazz);
}
diff --git a/test/005-annotations/src/android/test/anno/AnnoFancyMethod.java b/test/005-annotations/src/android/test/anno/AnnoFancyMethod.java
index 3088866..aa7808f 100644
--- a/test/005-annotations/src/android/test/anno/AnnoFancyMethod.java
+++ b/test/005-annotations/src/android/test/anno/AnnoFancyMethod.java
@@ -10,5 +10,5 @@
boolean callMe() default false;
boolean biteMe();
AnnoFancyMethodEnum enumerated() default AnnoFancyMethodEnum.FOO;
- Class someClass() default SomeClass.class;
+ Class<?> someClass() default SomeClass.class;
}
diff --git a/test/005-annotations/src/android/test/anno/AnnoMissingClass.java b/test/005-annotations/src/android/test/anno/AnnoMissingClass.java
index c32e9a2..7933b80 100644
--- a/test/005-annotations/src/android/test/anno/AnnoMissingClass.java
+++ b/test/005-annotations/src/android/test/anno/AnnoMissingClass.java
@@ -20,5 +20,5 @@
@Retention(RetentionPolicy.RUNTIME)
public @interface AnnoMissingClass {
- Class value();
+ Class<?> value();
}
diff --git a/test/005-annotations/src/android/test/anno/TestAnnotations.java b/test/005-annotations/src/android/test/anno/TestAnnotations.java
index 51254b4..8ea8e8e 100644
--- a/test/005-annotations/src/android/test/anno/TestAnnotations.java
+++ b/test/005-annotations/src/android/test/anno/TestAnnotations.java
@@ -42,7 +42,7 @@
}
}
- static void printAnnotations(Class clazz) {
+ static void printAnnotations(Class<?> clazz) {
Annotation[] annos;
Annotation[][] parAnnos;
@@ -52,7 +52,7 @@
printAnnotationArray("", annos);
System.out.println();
- for (Constructor c: clazz.getDeclaredConstructors()) {
+ for (Constructor<?> c: clazz.getDeclaredConstructors()) {
annos = c.getDeclaredAnnotations();
System.out.println(" annotations on CTOR " + c + ":");
printAnnotationArray(" ", annos);
@@ -139,8 +139,7 @@
final IntToString[] mapping;
try {
- meth = TestAnnotations.class.getMethod("getFocusType",
- (Class[])null);
+ meth = TestAnnotations.class.getMethod("getFocusType");
} catch (NoSuchMethodException nsme) {
throw new RuntimeException(nsme);
}
@@ -255,7 +254,7 @@
}
private static class VMRuntime {
- private static Class vmRuntimeClass;
+ private static Class<?> vmRuntimeClass;
private static Method getRuntimeMethod;
private static Method getTargetSdkVersionMethod;
private static Method setTargetSdkVersionMethod;
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index 0226614..d1ea0b1 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -85,7 +85,7 @@
Assert.assertEquals("this is a path", test.replaceAll("/", " "));
Assert.assertEquals("this is a path", test.replace("/", " "));
- Class Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
+ Class<?> Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
Method fromUTF8ByteArray = Strings.getDeclaredMethod("fromUTF8ByteArray", byte[].class);
String result = (String) fromUTF8ByteArray.invoke(null, new byte[] {'O', 'K'});
System.out.println(result);
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index 38bd525..346e13d 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -118,14 +118,13 @@
printClassAttrs(FancyClass.class);
try {
- Constructor cons;
- cons = MemberClass.class.getConstructor(
- new Class[] { MemberClass.class });
+ Constructor<?> cons;
+ cons = MemberClass.class.getConstructor(MemberClass.class);
System.out.println("constructor signature: "
+ getSignatureAttribute(cons));
Method meth;
- meth = MemberClass.class.getMethod("foo", (Class[]) null);
+ meth = MemberClass.class.getMethod("foo");
System.out.println("method signature: "
+ getSignatureAttribute(meth));
@@ -222,7 +221,7 @@
public static String getSignatureAttribute(Object obj) {
Method method;
try {
- Class c = obj.getClass();
+ Class<?> c = obj.getClass();
if (c == Method.class || c == Constructor.class) {
c = AbstractMethod.class;
}
@@ -263,9 +262,7 @@
/*
* Dump a variety of class attributes.
*/
- public static void printClassAttrs(Class clazz) {
- Class clazz2;
-
+ public static <T> void printClassAttrs(Class<T> clazz) {
System.out.println("***** " + clazz + ":");
System.out.println(" name: "
@@ -321,7 +318,7 @@
System.out.println(" genericInterfaces: "
+ stringifyTypeArray(clazz.getGenericInterfaces()));
- TypeVariable<Class<?>>[] typeParameters = clazz.getTypeParameters();
+ TypeVariable<Class<T>>[] typeParameters = clazz.getTypeParameters();
System.out.println(" typeParameters: "
+ stringifyTypeArray(typeParameters));
}
diff --git a/test/032-concrete-sub/src/ConcreteSub.java b/test/032-concrete-sub/src/ConcreteSub.java
index 083f25d..95adf63 100644
--- a/test/032-concrete-sub/src/ConcreteSub.java
+++ b/test/032-concrete-sub/src/ConcreteSub.java
@@ -37,13 +37,13 @@
/*
* Check reflection stuff.
*/
- Class absClass = AbstractBase.class;
+ Class<?> absClass = AbstractBase.class;
Method meth;
System.out.println("class modifiers=" + absClass.getModifiers());
try {
- meth = absClass.getMethod("redefineMe", (Class[]) null);
+ meth = absClass.getMethod("redefineMe");
} catch (NoSuchMethodException nsme) {
nsme.printStackTrace();
return;
diff --git a/test/042-new-instance/src/Main.java b/test/042-new-instance/src/Main.java
index 8cd6b2e..755d62e 100644
--- a/test/042-new-instance/src/Main.java
+++ b/test/042-new-instance/src/Main.java
@@ -33,7 +33,7 @@
static void testClassNewInstance() {
// should succeed
try {
- Class c = Class.forName("LocalClass");
+ Class<?> c = Class.forName("LocalClass");
Object obj = c.newInstance();
System.out.println("LocalClass succeeded");
} catch (Exception ex) {
@@ -43,7 +43,7 @@
// should fail
try {
- Class c = Class.forName("otherpackage.PackageAccess");
+ Class<?> c = Class.forName("otherpackage.PackageAccess");
Object obj = c.newInstance();
System.err.println("ERROR: PackageAccess succeeded unexpectedly");
} catch (IllegalAccessException iae) {
@@ -71,8 +71,8 @@
static void testConstructorNewInstance() {
// should fail -- getConstructor only returns public constructors
try {
- Class c = Class.forName("LocalClass");
- Constructor cons = c.getConstructor(new Class[0] /*(Class[])null*/);
+ Class<?> c = Class.forName("LocalClass");
+ Constructor<?> cons = c.getConstructor();
System.err.println("Cons LocalClass succeeded unexpectedly");
} catch (NoSuchMethodException nsme) {
System.out.println("Cons LocalClass failed as expected");
@@ -83,8 +83,8 @@
// should succeed
try {
- Class c = Class.forName("LocalClass2");
- Constructor cons = c.getConstructor((Class[]) null);
+ Class<?> c = Class.forName("LocalClass2");
+ Constructor<?> cons = c.getConstructor();
Object obj = cons.newInstance();
System.out.println("Cons LocalClass2 succeeded");
} catch (Exception ex) {
@@ -94,8 +94,8 @@
// should succeed
try {
- Class c = Class.forName("Main$InnerClass");
- Constructor cons = c.getDeclaredConstructor(new Class<?>[]{Main.class});
+ Class<?> c = Class.forName("Main$InnerClass");
+ Constructor<?> cons = c.getDeclaredConstructor(Main.class);
Object obj = cons.newInstance(new Main());
System.out.println("Cons InnerClass succeeded");
} catch (Exception ex) {
@@ -105,8 +105,8 @@
// should succeed
try {
- Class c = Class.forName("Main$StaticInnerClass");
- Constructor cons = c.getDeclaredConstructor((Class[]) null);
+ Class<?> c = Class.forName("Main$StaticInnerClass");
+ Constructor<?> cons = c.getDeclaredConstructor();
Object obj = cons.newInstance();
System.out.println("Cons StaticInnerClass succeeded");
} catch (Exception ex) {
@@ -116,8 +116,8 @@
// should fail
try {
- Class c = Class.forName("otherpackage.PackageAccess");
- Constructor cons = c.getConstructor(new Class[0] /*(Class[])null*/);
+ Class<?> c = Class.forName("otherpackage.PackageAccess");
+ Constructor<?> cons = c.getConstructor();
System.err.println("ERROR: Cons PackageAccess succeeded unexpectedly");
} catch (NoSuchMethodException nsme) {
// constructor isn't public
@@ -129,8 +129,8 @@
// should fail
try {
- Class c = Class.forName("MaybeAbstract");
- Constructor cons = c.getConstructor(new Class[0] /*(Class[])null*/);
+ Class<?> c = Class.forName("MaybeAbstract");
+ Constructor<?> cons = c.getConstructor();
Object obj = cons.newInstance();
System.err.println("ERROR: Cons MaybeAbstract succeeded unexpectedly");
} catch (InstantiationException ie) {
@@ -143,8 +143,8 @@
// should fail
try {
- Class c = Class.forName("otherpackage.PackageAccess2");
- Constructor cons = c.getConstructor((Class[]) null);
+ Class<?> c = Class.forName("otherpackage.PackageAccess2");
+ Constructor<?> cons = c.getConstructor();
if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
Object obj = cons.newInstance();
System.err.println("ERROR: Cons PackageAccess2 succeeded unexpectedly");
@@ -197,7 +197,7 @@
static Object newInstance() {
try {
- Class c = CC.class;
+ Class<?> c = CC.class;
return c.newInstance();
} catch (Exception ex) {
ex.printStackTrace();
diff --git a/test/042-new-instance/src/otherpackage/ConstructorAccess.java b/test/042-new-instance/src/otherpackage/ConstructorAccess.java
index a74e9a0..79d572c 100644
--- a/test/042-new-instance/src/otherpackage/ConstructorAccess.java
+++ b/test/042-new-instance/src/otherpackage/ConstructorAccess.java
@@ -29,8 +29,8 @@
// accessibility using the frame below (in Main class), we will see an
// IllegalAccessException from #newInstance
static public void newConstructorInstance() throws Exception {
- Class c = Inner.class;
- Constructor cons = c.getDeclaredConstructor((Class[]) null);
+ Class<?> c = Inner.class;
+ Constructor cons = c.getDeclaredConstructor();
Object obj = cons.newInstance();
}
}
diff --git a/test/044-proxy/src/BasicTest.java b/test/044-proxy/src/BasicTest.java
index 445a6cc..5f04b93 100644
--- a/test/044-proxy/src/BasicTest.java
+++ b/test/044-proxy/src/BasicTest.java
@@ -99,18 +99,16 @@
InvocationHandler handler = new MyInvocationHandler(proxyMe);
/* create the proxy class */
- Class proxyClass = Proxy.getProxyClass(Shapes.class.getClassLoader(),
- new Class[] { Quads.class, Colors.class, Trace.class });
+ Class<?> proxyClass = Proxy.getProxyClass(Shapes.class.getClassLoader(),
+ Quads.class, Colors.class, Trace.class);
Main.registerProxyClassName(proxyClass.getCanonicalName());
/* create a proxy object, passing the handler object in */
Object proxy = null;
try {
- Constructor<Class> cons;
- cons = proxyClass.getConstructor(
- new Class[] { InvocationHandler.class });
+ Constructor<?> cons = proxyClass.getConstructor(InvocationHandler.class);
//System.out.println("Constructor is " + cons);
- proxy = cons.newInstance(new Object[] { handler });
+ proxy = cons.newInstance(handler);
} catch (NoSuchMethodException nsme) {
System.err.println("failed: " + nsme);
} catch (InstantiationException ie) {
diff --git a/test/044-proxy/src/Clash.java b/test/044-proxy/src/Clash.java
index adeffdc..d000112 100644
--- a/test/044-proxy/src/Clash.java
+++ b/test/044-proxy/src/Clash.java
@@ -30,7 +30,7 @@
/* try passing in the same interface twice */
try {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
- new Class[] { Interface1A.class, Interface1A.class },
+ new Class<?>[] { Interface1A.class, Interface1A.class },
handler);
System.err.println("Dupe did not throw expected exception");
} catch (IllegalArgumentException iae) {
@@ -39,7 +39,7 @@
try {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
- new Class[] { Interface1A.class, Interface1B.class },
+ new Class<?>[] { Interface1A.class, Interface1B.class },
handler);
System.err.println("Clash did not throw expected exception");
} catch (IllegalArgumentException iae) {
diff --git a/test/044-proxy/src/Clash2.java b/test/044-proxy/src/Clash2.java
index 2a384f4..e405cfe 100644
--- a/test/044-proxy/src/Clash2.java
+++ b/test/044-proxy/src/Clash2.java
@@ -29,7 +29,7 @@
try {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
- new Class[] { Interface2A.class, Interface2B.class },
+ new Class<?>[] { Interface2A.class, Interface2B.class },
handler);
System.err.println("Clash2 did not throw expected exception");
} catch (IllegalArgumentException iae) {
diff --git a/test/044-proxy/src/Clash3.java b/test/044-proxy/src/Clash3.java
index 6d6f2f2..44806ce 100644
--- a/test/044-proxy/src/Clash3.java
+++ b/test/044-proxy/src/Clash3.java
@@ -29,7 +29,7 @@
try {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
- new Class[] {
+ new Class<?>[] {
Interface3a.class,
Interface3base.class,
Interface3aa.class,
diff --git a/test/044-proxy/src/Clash4.java b/test/044-proxy/src/Clash4.java
index 1bfb37f..ca5c3ab 100644
--- a/test/044-proxy/src/Clash4.java
+++ b/test/044-proxy/src/Clash4.java
@@ -29,7 +29,7 @@
try {
Proxy.newProxyInstance(Clash.class.getClassLoader(),
- new Class[] {
+ new Class<?>[] {
Interface4a.class,
Interface4aa.class,
Interface4base.class,
diff --git a/test/044-proxy/src/FloatSelect.java b/test/044-proxy/src/FloatSelect.java
index febe697..217ccaf 100644
--- a/test/044-proxy/src/FloatSelect.java
+++ b/test/044-proxy/src/FloatSelect.java
@@ -34,7 +34,7 @@
public static void main(String[] args) {
FloatSelectI proxyObject = (FloatSelectI) Proxy.newProxyInstance(
FloatSelectI.class.getClassLoader(),
- new Class[] { FloatSelectI.class },
+ new Class<?>[] { FloatSelectI.class },
new FloatSelectIInvoke1());
float floatResult = proxyObject.method(2.1f, 5.8f);
diff --git a/test/044-proxy/src/NativeProxy.java b/test/044-proxy/src/NativeProxy.java
index b425da8..c609dc2 100644
--- a/test/044-proxy/src/NativeProxy.java
+++ b/test/044-proxy/src/NativeProxy.java
@@ -40,7 +40,7 @@
try {
NativeInterface inf = (NativeInterface)Proxy.newProxyInstance(
NativeProxy.class.getClassLoader(),
- new Class[] { NativeInterface.class },
+ new Class<?>[] { NativeInterface.class },
new NativeInvocationHandler());
nativeCall(inf);
diff --git a/test/044-proxy/src/ReturnsAndArgPassing.java b/test/044-proxy/src/ReturnsAndArgPassing.java
index 225cc5b..3d8ebf0 100644
--- a/test/044-proxy/src/ReturnsAndArgPassing.java
+++ b/test/044-proxy/src/ReturnsAndArgPassing.java
@@ -98,7 +98,7 @@
MyInvocationHandler myHandler = new MyInvocationHandler();
MyInterface proxyMyInterface =
(MyInterface)Proxy.newProxyInstance(ReturnsAndArgPassing.class.getClassLoader(),
- new Class[] { MyInterface.class },
+ new Class<?>[] { MyInterface.class },
myHandler);
check(fooInvocations == 0);
proxyMyInterface.voidFoo();
@@ -441,7 +441,7 @@
MyInvocationHandler myHandler = new MyInvocationHandler();
MyInterface proxyMyInterface =
(MyInterface)Proxy.newProxyInstance(ReturnsAndArgPassing.class.getClassLoader(),
- new Class[] { MyInterface.class },
+ new Class<?>[] { MyInterface.class },
myHandler);
check((Integer)proxyMyInterface.selectArg(0, Integer.MAX_VALUE, Long.MAX_VALUE,
diff --git a/test/044-proxy/src/WrappedThrow.java b/test/044-proxy/src/WrappedThrow.java
index 27ae84e..643ba05 100644
--- a/test/044-proxy/src/WrappedThrow.java
+++ b/test/044-proxy/src/WrappedThrow.java
@@ -32,7 +32,7 @@
try {
proxy = Proxy.newProxyInstance(WrappedThrow.class.getClassLoader(),
- new Class[] { InterfaceW1.class, InterfaceW2.class },
+ new Class<?>[] { InterfaceW1.class, InterfaceW2.class },
handler);
} catch (IllegalArgumentException iae) {
System.out.println("WT init failed");
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 67a0d11..10dad8d 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -32,7 +32,7 @@
public Main(ArrayList<Integer> stuff) {}
void printMethodInfo(Method meth) {
- Class[] params, exceptions;
+ Class<?>[] params, exceptions;
int i;
System.out.println("Method name is " + meth.getName());
@@ -62,7 +62,7 @@
private void showStrings(Target instance)
throws NoSuchFieldException, IllegalAccessException {
- Class target = Target.class;
+ Class<?> target = Target.class;
String one, two, three, four;
Field field = null;
@@ -80,15 +80,15 @@
public static void checkAccess() {
try {
- Class target = otherpackage.Other.class;
+ Class<?> target = otherpackage.Other.class;
Object instance = new otherpackage.Other();
Method meth;
- meth = target.getMethod("publicMethod", (Class[]) null);
+ meth = target.getMethod("publicMethod");
meth.invoke(instance);
try {
- meth = target.getMethod("packageMethod", (Class[]) null);
+ meth = target.getMethod("packageMethod");
System.err.println("succeeded on package-scope method");
} catch (NoSuchMethodException nsme) {
// good
@@ -97,7 +97,7 @@
instance = otherpackage.Other.getInnerClassInstance();
target = instance.getClass();
- meth = target.getMethod("innerMethod", (Class[]) null);
+ meth = target.getMethod("innerMethod");
try {
if (!FULL_ACCESS_CHECKS) { throw new IllegalAccessException(); }
meth.invoke(instance);
@@ -121,26 +121,25 @@
}
public void run() {
- Class target = Target.class;
+ Class<Target> target = Target.class;
Method meth = null;
Field field = null;
boolean excep;
try {
- meth = target.getMethod("myMethod", new Class[] { int.class });
+ meth = target.getMethod("myMethod", int.class);
if (meth.getDeclaringClass() != target)
throw new RuntimeException();
printMethodInfo(meth);
- meth = target.getMethod("myMethod", new Class[] { float.class });
+ meth = target.getMethod("myMethod", float.class);
printMethodInfo(meth);
- meth = target.getMethod("myNoargMethod", (Class[]) null);
+ meth = target.getMethod("myNoargMethod");
printMethodInfo(meth);
- meth = target.getMethod("myMethod",
- new Class[] { String[].class, float.class, char.class });
+ meth = target.getMethod("myMethod", String[].class, float.class, char.class);
printMethodInfo(meth);
Target instance = new Target();
@@ -157,11 +156,11 @@
System.out.println("Result of invoke: " + boxval.intValue());
System.out.println("Calling no-arg void-return method");
- meth = target.getMethod("myNoargMethod", (Class[]) null);
+ meth = target.getMethod("myNoargMethod");
meth.invoke(instance, (Object[]) null);
/* try invoking a method that throws an exception */
- meth = target.getMethod("throwingMethod", (Class[]) null);
+ meth = target.getMethod("throwingMethod");
try {
meth.invoke(instance, (Object[]) null);
System.out.println("GLITCH: didn't throw");
@@ -372,7 +371,7 @@
Target targ;
Object[] args;
- cons = target.getConstructor(new Class[] { int.class,float.class });
+ cons = target.getConstructor(int.class, float.class);
args = new Object[] { new Integer(7), new Float(3.3333) };
System.out.println("cons modifiers=" + cons.getModifiers());
targ = cons.newInstance(args);
@@ -458,7 +457,7 @@
public static void checkClinitForFields() throws Exception {
// Loading a class constant shouldn't run <clinit>.
System.out.println("calling const-class FieldNoisyInitUser.class");
- Class niuClass = FieldNoisyInitUser.class;
+ Class<?> niuClass = FieldNoisyInitUser.class;
System.out.println("called const-class FieldNoisyInitUser.class");
// Getting the declared fields doesn't run <clinit>.
@@ -480,14 +479,14 @@
public static void checkClinitForMethods() throws Exception {
// Loading a class constant shouldn't run <clinit>.
System.out.println("calling const-class MethodNoisyInitUser.class");
- Class niuClass = MethodNoisyInitUser.class;
+ Class<?> niuClass = MethodNoisyInitUser.class;
System.out.println("called const-class MethodNoisyInitUser.class");
// Getting the declared methods doesn't run <clinit>.
Method[] methods = niuClass.getDeclaredMethods();
System.out.println("got methods");
- Method method = niuClass.getMethod("staticMethod", (Class[]) null);
+ Method method = niuClass.getMethod("staticMethod");
System.out.println("got method");
method.invoke(null);
System.out.println("invoked method");
@@ -517,8 +516,7 @@
Method method;
try {
- method = Main.class.getMethod("fancyMethod",
- new Class[] { ArrayList.class });
+ method = Main.class.getMethod("fancyMethod", ArrayList.class);
} catch (NoSuchMethodException nsme) {
throw new RuntimeException(nsme);
}
@@ -527,9 +525,9 @@
System.out.println("generic method " + method.getName() + " params='"
+ stringifyTypeArray(parmTypes) + "' ret='" + ret + "'");
- Constructor ctor;
+ Constructor<?> ctor;
try {
- ctor = Main.class.getConstructor(new Class[] { ArrayList.class });
+ ctor = Main.class.getConstructor( ArrayList.class);
} catch (NoSuchMethodException nsme) {
throw new RuntimeException(nsme);
}
@@ -580,8 +578,8 @@
}
Method method1, method2;
try {
- method1 = Main.class.getMethod("fancyMethod", new Class[] { ArrayList.class });
- method2 = Main.class.getMethod("fancyMethod", new Class[] { ArrayList.class });
+ method1 = Main.class.getMethod("fancyMethod", ArrayList.class);
+ method2 = Main.class.getMethod("fancyMethod", ArrayList.class);
} catch (NoSuchMethodException nsme) {
throw new RuntimeException(nsme);
}
diff --git a/test/064-field-access/src/Main.java b/test/064-field-access/src/Main.java
index 5d90129..50ad5b9 100644
--- a/test/064-field-access/src/Main.java
+++ b/test/064-field-access/src/Main.java
@@ -38,7 +38,7 @@
}
try {
- Class c = Class.forName("SubClassUsingInaccessibleField");
+ Class<?> c = Class.forName("SubClassUsingInaccessibleField");
Object o = c.newInstance();
c.getMethod("test").invoke(o, null);
} catch (InvocationTargetException ite) {
@@ -64,7 +64,7 @@
* On success, the boxed value retrieved is returned.
*/
public Object getValue(Field field, Object obj, char type,
- Class expectedException) {
+ Class<?> expectedException) {
Object result = null;
try {
switch (type) {
@@ -638,7 +638,7 @@
* reflection call is significant]
*/
public Object getValue(Field field, Object obj, char type,
- Class expectedException) {
+ Class<?> expectedException) {
Object result = null;
try {
switch (type) {
@@ -698,7 +698,7 @@
return result;
}
- public Object invoke(Method method, Object obj, Class expectedException) {
+ public Object invoke(Method method, Object obj, Class<?> expectedException) {
Object result = null;
try {
result = method.invoke(obj);
diff --git a/test/068-classloader/src/FancyLoader.java b/test/068-classloader/src/FancyLoader.java
index 6a153cc..e616bfc 100644
--- a/test/068-classloader/src/FancyLoader.java
+++ b/test/068-classloader/src/FancyLoader.java
@@ -41,7 +41,7 @@
static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/068-classloader-ex.jar";
/* on Dalvik, this is a DexFile; otherwise, it's null */
- private Class mDexClass;
+ private Class<?> mDexClass;
private Object mDexFile;
@@ -82,12 +82,12 @@
if (mDexFile == null) {
synchronized (FancyLoader.class) {
- Constructor ctor;
+ Constructor<?> ctor;
/*
* Construct a DexFile object through reflection.
*/
try {
- ctor = mDexClass.getConstructor(new Class[] {String.class});
+ ctor = mDexClass.getConstructor(String.class);
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getConstructor failed",
nsme);
@@ -111,8 +111,7 @@
Method meth;
try {
- meth = mDexClass.getMethod("loadClass",
- new Class[] { String.class, ClassLoader.class });
+ meth = mDexClass.getMethod("loadClass", String.class, ClassLoader.class);
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getMethod failed", nsme);
}
@@ -184,7 +183,7 @@
protected Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException
{
- Class res;
+ Class<?> res;
/*
* 1. Invoke findLoadedClass(String) to check if the class has
diff --git a/test/068-classloader/src/Main.java b/test/068-classloader/src/Main.java
index b2d843b..01539b7 100644
--- a/test/068-classloader/src/Main.java
+++ b/test/068-classloader/src/Main.java
@@ -74,11 +74,10 @@
/* this is the "alternate" DEX/Jar file */
String DEX_FILE = System.getenv("DEX_LOCATION") + "/068-classloader-ex.jar";
/* on Dalvik, this is a DexFile; otherwise, it's null */
- Class mDexClass = Class.forName("dalvik.system.DexFile");
- Constructor ctor = mDexClass.getConstructor(new Class[] {String.class});
+ Class<?> mDexClass = Class.forName("dalvik.system.DexFile");
+ Constructor<?> ctor = mDexClass.getConstructor(String.class);
Object mDexFile = ctor.newInstance(DEX_FILE);
- Method meth = mDexClass.getMethod("loadClass",
- new Class[] { String.class, ClassLoader.class });
+ Method meth = mDexClass.getMethod("loadClass", String.class, ClassLoader.class);
Object klass = meth.invoke(mDexFile, "Mutator", null);
if (klass == null) {
throw new AssertionError("loadClass with nullclass loader failed");
@@ -94,15 +93,15 @@
FancyLoader loader2 = new FancyLoader(ClassLoader.getSystemClassLoader());
try {
- Class target1 = loader1.loadClass("MutationTarget");
- Class target2 = loader2.loadClass("MutationTarget");
+ Class<?> target1 = loader1.loadClass("MutationTarget");
+ Class<?> target2 = loader2.loadClass("MutationTarget");
if (target1 == target2) {
throw new RuntimeException("target1 should not be equal to target2");
}
- Class mutator1 = loader1.loadClass("Mutator");
- Class mutator2 = loader2.loadClass("Mutator");
+ Class<?> mutator1 = loader1.loadClass("Mutator");
+ Class<?> mutator2 = loader2.loadClass("Mutator");
if (mutator1 == mutator2) {
throw new RuntimeException("mutator1 should not be equal to mutator2");
@@ -134,12 +133,12 @@
}
}
- private static void runMutator(Class c, int v) throws Exception {
+ private static void runMutator(Class<?> c, int v) throws Exception {
java.lang.reflect.Method m = c.getDeclaredMethod("mutate", int.class);
m.invoke(null, v);
}
- private static int getMutationTargetValue(Class c) throws Exception {
+ private static int getMutationTargetValue(Class<?> c) throws Exception {
java.lang.reflect.Field f = c.getDeclaredField("value");
return f.getInt(null);
}
@@ -149,7 +148,7 @@
* able to load it but not instantiate it.
*/
static void testAccess1(ClassLoader loader) {
- Class altClass;
+ Class<?> altClass;
try {
altClass = loader.loadClass("Inaccessible1");
@@ -179,7 +178,7 @@
* (though the base *is* accessible to us).
*/
static void testAccess2(ClassLoader loader) {
- Class altClass;
+ Class<?> altClass;
try {
altClass = loader.loadClass("Inaccessible2");
@@ -199,7 +198,7 @@
* See if we can load a class with an inaccessible interface.
*/
static void testAccess3(ClassLoader loader) {
- Class altClass;
+ Class<?> altClass;
try {
altClass = loader.loadClass("Inaccessible3");
@@ -219,7 +218,7 @@
* Test a doubled class that extends the base class.
*/
static void testExtend(ClassLoader loader) {
- Class doubledExtendClass;
+ Class<?> doubledExtendClass;
Object obj;
/* get the "alternate" version of DoubledExtend */
@@ -268,7 +267,7 @@
* it doesn't override the base class method.
*/
static void testExtendOkay(ClassLoader loader) {
- Class doubledExtendOkayClass;
+ Class<?> doubledExtendOkayClass;
Object obj;
/* get the "alternate" version of DoubledExtendOkay */
@@ -316,7 +315,7 @@
* an interface declared in a different class.
*/
static void testInterface(ClassLoader loader) {
- Class getDoubledClass;
+ Class<?> getDoubledClass;
Object obj;
/* get GetDoubled from the "alternate" class loader */
@@ -362,7 +361,7 @@
* Throw an abstract class into the middle and see what happens.
*/
static void testAbstract(ClassLoader loader) {
- Class abstractGetClass;
+ Class<?> abstractGetClass;
Object obj;
/* get AbstractGet from the "alternate" loader */
@@ -407,7 +406,7 @@
* Test a doubled class that implements a common interface.
*/
static void testImplement(ClassLoader loader) {
- Class doubledImplementClass;
+ Class<?> doubledImplementClass;
Object obj;
useImplement(new DoubledImplement(), true);
@@ -465,7 +464,7 @@
* that refers to a doubled class.
*/
static void testIfaceImplement(ClassLoader loader) {
- Class ifaceImplClass;
+ Class<?> ifaceImplClass;
Object obj;
/*
diff --git a/test/071-dexfile/src/Main.java b/test/071-dexfile/src/Main.java
index 2f85790..c3a9671 100644
--- a/test/071-dexfile/src/Main.java
+++ b/test/071-dexfile/src/Main.java
@@ -66,7 +66,7 @@
*/
private static void testDexClassLoader() throws Exception {
ClassLoader dexClassLoader = getDexClassLoader();
- Class Another = dexClassLoader.loadClass("Another");
+ Class<?> Another = dexClassLoader.loadClass("Another");
Object another = Another.newInstance();
// not expected to work; just exercises the call
dexClassLoader.getResource("nonexistent");
@@ -79,18 +79,21 @@
*/
private static ClassLoader getDexClassLoader() throws Exception {
ClassLoader classLoader = Main.class.getClassLoader();
- Class DexClassLoader = classLoader.loadClass("dalvik.system.DexClassLoader");
- Constructor DexClassLoader_init = DexClassLoader.getConstructor(String.class,
- String.class,
- String.class,
- ClassLoader.class);
+ Class<?> DexClassLoader = classLoader.loadClass("dalvik.system.DexClassLoader");
+ Constructor<?> DexClassLoader_init = DexClassLoader.getConstructor(String.class,
+ String.class,
+ String.class,
+ ClassLoader.class);
// create an instance, using the path we found
- return (ClassLoader) DexClassLoader_init.newInstance(CLASS_PATH, getOdexDir(), LIB_DIR, classLoader);
+ return (ClassLoader) DexClassLoader_init.newInstance(CLASS_PATH,
+ getOdexDir(),
+ LIB_DIR,
+ classLoader);
}
private static void testDexFile() throws Exception {
ClassLoader classLoader = Main.class.getClassLoader();
- Class DexFile = classLoader.loadClass("dalvik.system.DexFile");
+ Class<?> DexFile = classLoader.loadClass("dalvik.system.DexFile");
Method DexFile_loadDex = DexFile.getMethod("loadDex",
String.class,
String.class,
diff --git a/test/074-gc-thrash/src/Main.java b/test/074-gc-thrash/src/Main.java
index f947d0b..df04793 100644
--- a/test/074-gc-thrash/src/Main.java
+++ b/test/074-gc-thrash/src/Main.java
@@ -69,7 +69,7 @@
*/
private static Method getDumpHprofDataMethod() {
ClassLoader myLoader = Main.class.getClassLoader();
- Class vmdClass;
+ Class<?> vmdClass;
try {
vmdClass = myLoader.loadClass("dalvik.system.VMDebug");
} catch (ClassNotFoundException cnfe) {
@@ -78,8 +78,7 @@
Method meth;
try {
- meth = vmdClass.getMethod("dumpHprofData",
- new Class[] { String.class });
+ meth = vmdClass.getMethod("dumpHprofData", String.class);
} catch (NoSuchMethodException nsme) {
System.err.println("Found VMDebug but not dumpHprofData method");
return null;
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index f007b25..0ae92a9 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -105,7 +105,7 @@
static boolean triggerReflectionOOM() {
try {
Class<?> c = Main.class;
- Method m = c.getMethod("blowup", (Class[]) null);
+ Method m = c.getMethod("blowup");
holder = new Object[1000000];
m.invoke(null);
holder = null;
diff --git a/test/086-null-super/src/Main.java b/test/086-null-super/src/Main.java
index 060737f..8bd1786 100644
--- a/test/086-null-super/src/Main.java
+++ b/test/086-null-super/src/Main.java
@@ -75,14 +75,12 @@
* Find the DexFile class, and construct a DexFile object
* through reflection, then call loadCLass on it.
*/
- Class mDexClass = ClassLoader.getSystemClassLoader().
+ Class<?> mDexClass = ClassLoader.getSystemClassLoader().
loadClass("dalvik.system.DexFile");
- Constructor ctor = mDexClass.
- getConstructor(new Class[] {String.class});
+ Constructor<?> ctor = mDexClass.getConstructor(String.class);
Object mDexFile = ctor.newInstance(DEX_FILE);
Method meth = mDexClass.
- getMethod("loadClass",
- new Class[] { String.class, ClassLoader.class });
+ getMethod("loadClass", String.class, ClassLoader.class);
/*
* Invoking loadClass on CLASS_NAME is expected to
* throw an InvocationTargetException. Anything else
diff --git a/test/087-gc-after-link/src/Main.java b/test/087-gc-after-link/src/Main.java
index 7c47e99..698af0b 100644
--- a/test/087-gc-after-link/src/Main.java
+++ b/test/087-gc-after-link/src/Main.java
@@ -70,7 +70,7 @@
throws TestFailed, InvocationTargetException
{
Object dexFile = null;
- Class dexClass = null;
+ Class<?> dexClass = null;
try {
try {
@@ -80,11 +80,9 @@
*/
dexClass = ClassLoader.getSystemClassLoader().
loadClass("dalvik.system.DexFile");
- Constructor ctor = dexClass.
- getConstructor(new Class[] {String.class});
+ Constructor<?> ctor = dexClass.getConstructor(String.class);
dexFile = ctor.newInstance(DEX_FILE);
- Method meth = dexClass.getMethod("loadClass",
- new Class[] { String.class, ClassLoader.class });
+ Method meth = dexClass.getMethod("loadClass", String.class, ClassLoader.class);
/*
* Invoking loadClass on CLASS_NAME is expected to
* throw an InvocationTargetException. Anything else
@@ -95,7 +93,7 @@
} finally {
if (dexFile != null) {
/* close the DexFile to make CloseGuard happy */
- Method meth = dexClass.getMethod("close", (Class[]) null);
+ Method meth = dexClass.getMethod("close");
meth.invoke(dexFile);
}
}
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index 212c894..a6f0e64 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -100,7 +100,7 @@
*/
void constantLock() {
assertIsManaged();
- Class thing = Thread.class;
+ Class<?> thing = Thread.class;
synchronized (Thread.class) {}
}
diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java
index 50bbe51..72c5a28 100644
--- a/test/098-ddmc/src/Main.java
+++ b/test/098-ddmc/src/Main.java
@@ -136,7 +136,7 @@
private static final Method getRecentAllocationsMethod;
static {
try {
- Class c = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+ Class<?> c = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
enableRecentAllocationsMethod = c.getDeclaredMethod("enableRecentAllocations",
Boolean.TYPE);
getRecentAllocationStatusMethod = c.getDeclaredMethod("getRecentAllocationStatus");
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 8068721..90ad315 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -242,7 +242,7 @@
System.out.println("Instances of null " + VMDebug.countInstancesofClass(null, false));
System.out.println("Instances of ClassA assignable " +
VMDebug.countInstancesofClass(ClassA.class, true));
- Class[] classes = new Class[]{ClassA.class, ClassB.class, null};
+ Class<?>[] classes = new Class<?>[] {ClassA.class, ClassB.class, null};
long[] counts = VMDebug.countInstancesofClasses(classes, false);
System.out.println("Array counts " + Arrays.toString(counts));
counts = VMDebug.countInstancesofClasses(classes, true);
@@ -259,7 +259,7 @@
private static final Method countInstancesOfClassesMethod;
static {
try {
- Class c = Class.forName("dalvik.system.VMDebug");
+ Class<?> c = Class.forName("dalvik.system.VMDebug");
startMethodTracingMethod = c.getDeclaredMethod("startMethodTracing", String.class,
Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
@@ -292,10 +292,10 @@
public static Map<String, String> getRuntimeStats() throws Exception {
return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
}
- public static long countInstancesofClass(Class c, boolean assignable) throws Exception {
+ public static long countInstancesofClass(Class<?> c, boolean assignable) throws Exception {
return (long) countInstancesOfClassMethod.invoke(null, new Object[]{c, assignable});
}
- public static long[] countInstancesofClasses(Class[] classes, boolean assignable)
+ public static long[] countInstancesofClasses(Class<?>[] classes, boolean assignable)
throws Exception {
return (long[]) countInstancesOfClassesMethod.invoke(
null, new Object[]{classes, assignable});
diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java
index 1245852..91ba307 100644
--- a/test/100-reflect2/src/Main.java
+++ b/test/100-reflect2/src/Main.java
@@ -275,10 +275,8 @@
}
public static void testConstructorReflection() throws Exception {
- Constructor<?> ctor;
-
- ctor = String.class.getConstructor(new Class[0]);
- show(ctor.newInstance((Object[]) null));
+ Constructor<String> ctor = String.class.getConstructor();
+ show(ctor.newInstance());
ctor = String.class.getConstructor(char[].class, int.class, int.class);
show(ctor.newInstance(new char[] { '\u2714', 'y', 'z', '!' }, 1, 2));
@@ -287,7 +285,7 @@
private static void testPackagePrivateConstructor() {
try {
Class<?> c = Class.forName("sub.PPClass");
- Constructor cons = c.getConstructor();
+ Constructor<?> cons = c.getConstructor();
cons.newInstance();
throw new RuntimeException("Expected IllegalAccessException.");
} catch (IllegalAccessException e) {
@@ -301,7 +299,7 @@
private static void testPackagePrivateAccessibleConstructor() {
try {
Class<?> c = Class.forName("sub.PPClass");
- Constructor cons = c.getConstructor();
+ Constructor<?> cons = c.getConstructor();
cons.setAccessible(true); // ensure we prevent IllegalAccessException
cons.newInstance();
} catch (Exception e) {
diff --git a/test/107-int-math2/src/Main.java b/test/107-int-math2/src/Main.java
index 0c91d44..ec5678d 100644
--- a/test/107-int-math2/src/Main.java
+++ b/test/107-int-math2/src/Main.java
@@ -104,7 +104,7 @@
}
static int constClassTest(int x) {
- Class c = String.class;
+ Class<?> c = String.class;
if (c != null) {
return x * 2;
} else {
diff --git a/test/118-noimage-dex2oat/src/Main.java b/test/118-noimage-dex2oat/src/Main.java
index dba9166..cc19107 100644
--- a/test/118-noimage-dex2oat/src/Main.java
+++ b/test/118-noimage-dex2oat/src/Main.java
@@ -51,7 +51,7 @@
private static final Method isBootClassPathOnDiskMethod;
static {
try {
- Class c = Class.forName("dalvik.system.VMRuntime");
+ Class<?> c = Class.forName("dalvik.system.VMRuntime");
getCurrentInstructionSetMethod = c.getDeclaredMethod("getCurrentInstructionSet");
isBootClassPathOnDiskMethod = c.getDeclaredMethod("isBootClassPathOnDisk",
String.class);
diff --git a/test/125-gc-and-classloading/src/Main.java b/test/125-gc-and-classloading/src/Main.java
index 61e123d..e81ef7b 100644
--- a/test/125-gc-and-classloading/src/Main.java
+++ b/test/125-gc-and-classloading/src/Main.java
@@ -57,7 +57,7 @@
public void run() {
try {
cdl.await();
- Class c0 = Class.forName("Main$BigClass");
+ Class<?> c0 = Class.forName("Main$BigClass");
} catch (Exception e) {
throw new RuntimeException(e);
}
diff --git a/test/130-hprof/src/Main.java b/test/130-hprof/src/Main.java
index 9868c61..c145f27 100644
--- a/test/130-hprof/src/Main.java
+++ b/test/130-hprof/src/Main.java
@@ -37,15 +37,15 @@
private static Object allocInDifferentLoader() throws Exception {
final String DEX_FILE = System.getenv("DEX_LOCATION") + "/130-hprof-ex.jar";
- Class pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
if (pathClassLoader == null) {
throw new AssertionError("Couldn't find path class loader class");
}
- Constructor constructor =
+ Constructor<?> constructor =
pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
ClassLoader loader = (ClassLoader)constructor.newInstance(
DEX_FILE, ClassLoader.getSystemClassLoader());
- Class allocator = loader.loadClass("Allocator");
+ Class<?> allocator = loader.loadClass("Allocator");
return allocator.getDeclaredMethod("allocObject", null).invoke(null);
}
@@ -105,7 +105,7 @@
System.out.println("Generated data.");
createDumpAndConv();
- Class klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+ Class<?> klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
if (klass == null) {
throw new AssertionError("Couldn't find path class loader class");
}
@@ -153,7 +153,7 @@
*/
private static Method getDumpHprofDataMethod() {
ClassLoader myLoader = Main.class.getClassLoader();
- Class vmdClass;
+ Class<?> vmdClass;
try {
vmdClass = myLoader.loadClass("dalvik.system.VMDebug");
} catch (ClassNotFoundException cnfe) {
@@ -162,8 +162,7 @@
Method meth;
try {
- meth = vmdClass.getMethod("dumpHprofData",
- new Class[] { String.class });
+ meth = vmdClass.getMethod("dumpHprofData", String.class);
} catch (NoSuchMethodException nsme) {
System.err.println("Found VMDebug but not dumpHprofData method");
return null;
diff --git a/test/134-reg-promotion/src/Main.java b/test/134-reg-promotion/src/Main.java
index 008ac58..f633524 100644
--- a/test/134-reg-promotion/src/Main.java
+++ b/test/134-reg-promotion/src/Main.java
@@ -32,13 +32,13 @@
public static void main(String args[]) throws Exception {
Class<?> c = Class.forName("Test");
- Method m = c.getMethod("run", (Class[]) null);
+ Method m = c.getMethod("run");
for (int i = 0; i < 10; i++) {
holder = new char[128 * 1024][];
m.invoke(null, (Object[]) null);
holder = null;
}
- m = c.getMethod("run2", (Class[]) null);
+ m = c.getMethod("run2");
for (int i = 0; i < 10; i++) {
holder = new char[128 * 1024][];
m.invoke(null, (Object[]) null);
diff --git a/test/138-duplicate-classes-check/src/Main.java b/test/138-duplicate-classes-check/src/Main.java
index a2ef281..5ffceb9 100644
--- a/test/138-duplicate-classes-check/src/Main.java
+++ b/test/138-duplicate-classes-check/src/Main.java
@@ -38,7 +38,7 @@
getClass().getClassLoader());
try {
- Class testEx = loader.loadClass("TestEx");
+ Class<?> testEx = loader.loadClass("TestEx");
Method test = testEx.getDeclaredMethod("test");
test.invoke(null);
} catch (Exception exc) {
diff --git a/test/138-duplicate-classes-check2/src/FancyLoader.java b/test/138-duplicate-classes-check2/src/FancyLoader.java
index 7e2bb08..58b7ec4 100644
--- a/test/138-duplicate-classes-check2/src/FancyLoader.java
+++ b/test/138-duplicate-classes-check2/src/FancyLoader.java
@@ -42,7 +42,7 @@
"/138-duplicate-classes-check2-ex.jar";
/* on Dalvik, this is a DexFile; otherwise, it's null */
- private Class mDexClass;
+ private Class<?> mDexClass;
private Object mDexFile;
@@ -83,12 +83,12 @@
if (mDexFile == null) {
synchronized (FancyLoader.class) {
- Constructor ctor;
+ Constructor<?> ctor;
/*
* Construct a DexFile object through reflection.
*/
try {
- ctor = mDexClass.getConstructor(new Class[] {String.class});
+ ctor = mDexClass.getConstructor(String.class);
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getConstructor failed",
nsme);
@@ -112,8 +112,7 @@
Method meth;
try {
- meth = mDexClass.getMethod("loadClass",
- new Class[] { String.class, ClassLoader.class });
+ meth = mDexClass.getMethod("loadClass", String.class, ClassLoader.class);
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getMethod failed", nsme);
}
@@ -185,7 +184,7 @@
protected Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException
{
- Class res;
+ Class<?> res;
/*
* 1. Invoke findLoadedClass(String) to check if the class has
diff --git a/test/138-duplicate-classes-check2/src/Main.java b/test/138-duplicate-classes-check2/src/Main.java
index a9b5bb0..a0d6977 100644
--- a/test/138-duplicate-classes-check2/src/Main.java
+++ b/test/138-duplicate-classes-check2/src/Main.java
@@ -33,7 +33,7 @@
FancyLoader loader = new FancyLoader(getClass().getClassLoader());
try {
- Class testEx = loader.loadClass("TestEx");
+ Class<?> testEx = loader.loadClass("TestEx");
Method test = testEx.getDeclaredMethod("test");
test.invoke(null);
} catch (Exception exc) {
diff --git a/test/139-register-natives/src/Main.java b/test/139-register-natives/src/Main.java
index 8dd2131..11bd53f 100644
--- a/test/139-register-natives/src/Main.java
+++ b/test/139-register-natives/src/Main.java
@@ -47,7 +47,7 @@
}
}
- private native static int registerNatives(Class c);
+ private native static int registerNatives(Class<?> c);
private static void expectThrows(Base b) {
try {
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 9ed8d28..f9b6180 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -28,11 +28,11 @@
public static void main(String[] args) throws Exception {
nativeLibraryName = args[0];
- Class pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
if (pathClassLoader == null) {
throw new AssertionError("Couldn't find path class loader class");
}
- Constructor constructor =
+ Constructor<?> constructor =
pathClassLoader.getDeclaredConstructor(String.class, String.class, ClassLoader.class);
try {
testUnloadClass(constructor);
@@ -67,7 +67,7 @@
System.out.println("Number of loaded unload-ex maps " + count);
}
- private static void stressTest(Constructor constructor) throws Exception {
+ private static void stressTest(Constructor<?> constructor) throws Exception {
for (int i = 0; i <= 100; ++i) {
setUpUnloadLoader(constructor, false);
if (i % 10 == 0) {
@@ -76,7 +76,7 @@
}
}
- private static void testUnloadClass(Constructor constructor) throws Exception {
+ private static void testUnloadClass(Constructor<?> constructor) throws Exception {
WeakReference<Class> klass = setUpUnloadClassWeak(constructor);
// No strong references to class loader, should get unloaded.
Runtime.getRuntime().gc();
@@ -87,7 +87,7 @@
System.out.println(klass2.get());
}
- private static void testUnloadLoader(Constructor constructor)
+ private static void testUnloadLoader(Constructor<?> constructor)
throws Exception {
WeakReference<ClassLoader> loader = setUpUnloadLoader(constructor, true);
// No strong references to class loader, should get unloaded.
@@ -96,8 +96,8 @@
System.out.println(loader.get());
}
- private static void testStackTrace(Constructor constructor) throws Exception {
- Class klass = setUpUnloadClass(constructor);
+ private static void testStackTrace(Constructor<?> constructor) throws Exception {
+ Class<?> klass = setUpUnloadClass(constructor);
WeakReference<Class> weak_klass = new WeakReference(klass);
Method stackTraceMethod = klass.getDeclaredMethod("generateStackTrace");
Throwable throwable = (Throwable) stackTraceMethod.invoke(klass);
@@ -108,7 +108,7 @@
System.out.println("class null " + isNull + " " + throwable.getMessage());
}
- private static void testLoadAndUnloadLibrary(Constructor constructor) throws Exception {
+ private static void testLoadAndUnloadLibrary(Constructor<?> constructor) throws Exception {
WeakReference<ClassLoader> loader = setUpLoadLibrary(constructor);
// No strong references to class loader, should get unloaded.
Runtime.getRuntime().gc();
@@ -117,7 +117,7 @@
}
private static Object testNoUnloadHelper(ClassLoader loader) throws Exception {
- Class intHolder = loader.loadClass("IntHolder");
+ Class<?> intHolder = loader.loadClass("IntHolder");
return intHolder.newInstance();
}
@@ -131,14 +131,14 @@
public WeakReference<ClassLoader> classLoader;
}
- private static Pair testNoUnloadInstanceHelper(Constructor constructor) throws Exception {
+ private static Pair testNoUnloadInstanceHelper(Constructor<?> constructor) throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
Object o = testNoUnloadHelper(loader);
return new Pair(o, loader);
}
- private static void testNoUnloadInstance(Constructor constructor) throws Exception {
+ private static void testNoUnloadInstance(Constructor<?> constructor) throws Exception {
Pair p = testNoUnloadInstanceHelper(constructor);
Runtime.getRuntime().gc();
// If the class loader was unloded too early due to races, just pass the test.
@@ -146,10 +146,10 @@
System.out.println("loader null " + isNull);
}
- private static Class setUpUnloadClass(Constructor constructor) throws Exception {
+ private static Class<?> setUpUnloadClass(Constructor<?> constructor) throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
- Class intHolder = loader.loadClass("IntHolder");
+ Class<?> intHolder = loader.loadClass("IntHolder");
Method getValue = intHolder.getDeclaredMethod("getValue");
Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE);
// Make sure we don't accidentally preserve the value in the int holder, the class
@@ -161,17 +161,17 @@
return intHolder;
}
- private static WeakReference<Class> setUpUnloadClassWeak(Constructor constructor)
+ private static WeakReference<Class> setUpUnloadClassWeak(Constructor<?> constructor)
throws Exception {
return new WeakReference<Class>(setUpUnloadClass(constructor));
}
- private static WeakReference<ClassLoader> setUpUnloadLoader(Constructor constructor,
+ private static WeakReference<ClassLoader> setUpUnloadLoader(Constructor<?> constructor,
boolean waitForCompilation)
throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
- Class intHolder = loader.loadClass("IntHolder");
+ Class<?> intHolder = loader.loadClass("IntHolder");
Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE);
setValue.invoke(intHolder, 2);
if (waitForCompilation) {
@@ -180,7 +180,7 @@
return new WeakReference(loader);
}
- private static void waitForCompilation(Class intHolder) throws Exception {
+ private static void waitForCompilation(Class<?> intHolder) throws Exception {
// Load the native library so that we can call waitForCompilation.
Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class);
loadLibrary.invoke(intHolder, nativeLibraryName);
@@ -189,11 +189,11 @@
waitForCompilation.invoke(intHolder);
}
- private static WeakReference<ClassLoader> setUpLoadLibrary(Constructor constructor)
+ private static WeakReference<ClassLoader> setUpLoadLibrary(Constructor<?> constructor)
throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
- Class intHolder = loader.loadClass("IntHolder");
+ Class<?> intHolder = loader.loadClass("IntHolder");
Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class);
loadLibrary.invoke(intHolder, nativeLibraryName);
waitForCompilation(intHolder);
diff --git a/test/142-classloader2/src/Main.java b/test/142-classloader2/src/Main.java
index 89dadce..80b00e7 100644
--- a/test/142-classloader2/src/Main.java
+++ b/test/142-classloader2/src/Main.java
@@ -25,8 +25,8 @@
private static ClassLoader createClassLoader(String dexPath, ClassLoader parent) {
try {
Class<?> myClassLoaderClass = Class.forName("MyPathClassLoader");
- Constructor constructor = myClassLoaderClass.getConstructor(String.class,
- ClassLoader.class);
+ Constructor<?> constructor = myClassLoaderClass.getConstructor(String.class,
+ ClassLoader.class);
return (ClassLoader)constructor.newInstance(dexPath, parent);
} catch (Exception e) {
// Ups, not available?!?!
diff --git a/test/145-alloc-tracking-stress/src/Main.java b/test/145-alloc-tracking-stress/src/Main.java
index 752fdd9..4a67a80 100644
--- a/test/145-alloc-tracking-stress/src/Main.java
+++ b/test/145-alloc-tracking-stress/src/Main.java
@@ -31,7 +31,7 @@
}
public static void main(String[] args) throws Exception {
- Class klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+ Class<?> klass = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
if (klass == null) {
throw new AssertionError("Couldn't find DdmVmInternal class");
}
diff --git a/test/148-multithread-gc-annotations/src/AnnoClass1.java b/test/148-multithread-gc-annotations/src/AnnoClass1.java
index b82c61f..3eb45ae 100644
--- a/test/148-multithread-gc-annotations/src/AnnoClass1.java
+++ b/test/148-multithread-gc-annotations/src/AnnoClass1.java
@@ -19,5 +19,5 @@
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface AnnoClass1 {
- Class value();
+ Class<?> value();
}
diff --git a/test/148-multithread-gc-annotations/src/AnnoClass2.java b/test/148-multithread-gc-annotations/src/AnnoClass2.java
index c75d950..b17490f 100644
--- a/test/148-multithread-gc-annotations/src/AnnoClass2.java
+++ b/test/148-multithread-gc-annotations/src/AnnoClass2.java
@@ -19,5 +19,5 @@
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface AnnoClass2 {
- Class value();
+ Class<?> value();
}
diff --git a/test/148-multithread-gc-annotations/src/AnnoClass3.java b/test/148-multithread-gc-annotations/src/AnnoClass3.java
index 5b4a378..7d600a8 100644
--- a/test/148-multithread-gc-annotations/src/AnnoClass3.java
+++ b/test/148-multithread-gc-annotations/src/AnnoClass3.java
@@ -19,5 +19,5 @@
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface AnnoClass3 {
- Class value();
+ Class<?> value();
}
diff --git a/test/201-built-in-exception-detail-messages/src/Main.java b/test/201-built-in-exception-detail-messages/src/Main.java
index f0bb6dd..dc58819 100644
--- a/test/201-built-in-exception-detail-messages/src/Main.java
+++ b/test/201-built-in-exception-detail-messages/src/Main.java
@@ -247,7 +247,7 @@
* Helper for testCastOperatorWithArrays. It's important that
* the return type is Object.
*/
- private static Object makeArray(Class c) {
+ private static Object makeArray(Class<?> c) {
return Array.newInstance(c, 1);
}
diff --git a/test/420-const-class/src/Main.java b/test/420-const-class/src/Main.java
index 44a7436..90ccf3a 100644
--- a/test/420-const-class/src/Main.java
+++ b/test/420-const-class/src/Main.java
@@ -53,15 +53,15 @@
$opt$LoadAndClinitCheck();
}
- public static Class $opt$LoadThisClass() {
+ public static Class<?> $opt$LoadThisClass() {
return Main.class;
}
- public static Class $opt$LoadOtherClass() {
+ public static Class<?> $opt$LoadOtherClass() {
return Other.class;
}
- public static Class $opt$LoadSystemClass() {
+ public static Class<?> $opt$LoadSystemClass() {
return System.class;
}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index b7712a7..33ef10b 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -213,17 +213,17 @@
* Exercise constant folding on addition.
*/
- /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntAddition1() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Add:i\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntAddition1() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: int Main.IntAddition1() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntAddition1() constant_folding$after_inlining (after)
/// CHECK-NOT: Add
public static int IntAddition1() {
@@ -234,7 +234,7 @@
return c;
}
- /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntAddition2() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
@@ -244,11 +244,11 @@
/// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]
/// CHECK-DAG: Return [<<Add3>>]
- /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntAddition2() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const14:i\d+>> IntConstant 14
/// CHECK-DAG: Return [<<Const14>>]
- /// CHECK-START: int Main.IntAddition2() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntAddition2() constant_folding$after_inlining (after)
/// CHECK-NOT: Add
public static int IntAddition2() {
@@ -263,17 +263,17 @@
return c;
}
- /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.LongAddition() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:j\d+>> LongConstant 1
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: <<Add:j\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongAddition() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: long Main.LongAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongAddition() constant_folding$after_inlining (after)
/// CHECK-NOT: Add
public static long LongAddition() {
@@ -284,17 +284,17 @@
return c;
}
- /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.FloatAddition() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:f\d+>> FloatConstant 1
/// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
/// CHECK-DAG: <<Add:f\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatAddition() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: float Main.FloatAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatAddition() constant_folding$after_inlining (after)
/// CHECK-NOT: Add
public static float FloatAddition() {
@@ -305,17 +305,17 @@
return c;
}
- /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:d\d+>> DoubleConstant 1
/// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
/// CHECK-DAG: <<Add:d\d+>> Add [<<Const1>>,<<Const2>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
/// CHECK-DAG: Return [<<Const3>>]
- /// CHECK-START: double Main.DoubleAddition() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleAddition() constant_folding$after_inlining (after)
/// CHECK-NOT: Add
public static double DoubleAddition() {
@@ -331,17 +331,17 @@
* Exercise constant folding on subtraction.
*/
- /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const6:i\d+>> IntConstant 6
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const4:i\d+>> IntConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: int Main.IntSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntSubtraction() constant_folding$after_inlining (after)
/// CHECK-NOT: Sub
public static int IntSubtraction() {
@@ -352,17 +352,17 @@
return c;
}
- /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const6:j\d+>> LongConstant 6
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: <<Sub:j\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const4:j\d+>> LongConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: long Main.LongSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongSubtraction() constant_folding$after_inlining (after)
/// CHECK-NOT: Sub
public static long LongSubtraction() {
@@ -373,17 +373,17 @@
return c;
}
- /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const6:f\d+>> FloatConstant 6
/// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
/// CHECK-DAG: <<Sub:f\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const4:f\d+>> FloatConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: float Main.FloatSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding$after_inlining (after)
/// CHECK-NOT: Sub
public static float FloatSubtraction() {
@@ -394,17 +394,17 @@
return c;
}
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const6:d\d+>> DoubleConstant 6
/// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
/// CHECK-DAG: <<Sub:d\d+>> Sub [<<Const6>>,<<Const2>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const4:d\d+>> DoubleConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: double Main.DoubleSubtraction() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding$after_inlining (after)
/// CHECK-NOT: Sub
public static double DoubleSubtraction() {
@@ -420,17 +420,17 @@
* Exercise constant folding on multiplication.
*/
- /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Mul:i\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const21:i\d+>> IntConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: int Main.IntMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntMultiplication() constant_folding$after_inlining (after)
/// CHECK-NOT: Mul
public static int IntMultiplication() {
@@ -441,17 +441,17 @@
return c;
}
- /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:j\d+>> LongConstant 7
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const21:j\d+>> LongConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: long Main.LongMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongMultiplication() constant_folding$after_inlining (after)
/// CHECK-NOT: Mul
public static long LongMultiplication() {
@@ -462,17 +462,17 @@
return c;
}
- /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:f\d+>> FloatConstant 7
/// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
/// CHECK-DAG: <<Mul:f\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const21:f\d+>> FloatConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: float Main.FloatMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding$after_inlining (after)
/// CHECK-NOT: Mul
public static float FloatMultiplication() {
@@ -483,17 +483,17 @@
return c;
}
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:d\d+>> DoubleConstant 7
/// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
/// CHECK-DAG: <<Mul:d\d+>> Mul [<<Const7>>,<<Const3>>]
/// CHECK-DAG: Return [<<Mul>>]
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const21:d\d+>> DoubleConstant 21
/// CHECK-DAG: Return [<<Const21>>]
- /// CHECK-START: double Main.DoubleMultiplication() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding$after_inlining (after)
/// CHECK-NOT: Mul
public static double DoubleMultiplication() {
@@ -509,18 +509,18 @@
* Exercise constant folding on division.
*/
- /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntDivision() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Div:i\d+>> Div [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntDivision() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: int Main.IntDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntDivision() constant_folding$after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Div
@@ -532,18 +532,18 @@
return c;
}
- /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.LongDivision() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:j\d+>> LongConstant 8
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Div:j\d+>> Div [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongDivision() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.LongDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongDivision() constant_folding$after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Div
@@ -555,17 +555,17 @@
return c;
}
- /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.FloatDivision() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
/// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
/// CHECK-DAG: <<Div:f\d+>> Div [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatDivision() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3P2:f\d+>> FloatConstant 3.2
/// CHECK-DAG: Return [<<Const3P2>>]
- /// CHECK-START: float Main.FloatDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatDivision() constant_folding$after_inlining (after)
/// CHECK-NOT: Div
public static float FloatDivision() {
@@ -576,17 +576,17 @@
return c;
}
- /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
/// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
/// CHECK-DAG: <<Div:d\d+>> Div [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Div>>]
- /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const3P2:d\d+>> DoubleConstant 3.2
/// CHECK-DAG: Return [<<Const3P2>>]
- /// CHECK-START: double Main.DoubleDivision() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleDivision() constant_folding$after_inlining (after)
/// CHECK-NOT: Div
public static double DoubleDivision() {
@@ -602,18 +602,18 @@
* Exercise constant folding on remainder.
*/
- /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.IntRemainder() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Rem:i\d+>> Rem [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntRemainder() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: int Main.IntRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.IntRemainder() constant_folding$after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Rem
@@ -625,18 +625,18 @@
return c;
}
- /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.LongRemainder() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:j\d+>> LongConstant 8
/// CHECK-DAG: <<Const3:j\d+>> LongConstant 3
/// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>]
/// CHECK-DAG: <<Rem:j\d+>> Rem [<<Const8>>,<<Div0Chk>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongRemainder() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.LongRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.LongRemainder() constant_folding$after_inlining (after)
/// CHECK-NOT: DivZeroCheck
/// CHECK-NOT: Rem
@@ -648,17 +648,17 @@
return c;
}
- /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
/// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
/// CHECK-DAG: <<Rem:f\d+>> Rem [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const0P5:f\d+>> FloatConstant 0.5
/// CHECK-DAG: Return [<<Const0P5>>]
- /// CHECK-START: float Main.FloatRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.FloatRemainder() constant_folding$after_inlining (after)
/// CHECK-NOT: Rem
public static float FloatRemainder() {
@@ -669,17 +669,17 @@
return c;
}
- /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
/// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
/// CHECK-DAG: <<Rem:d\d+>> Rem [<<Const8>>,<<Const2P5>>]
/// CHECK-DAG: Return [<<Rem>>]
- /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const0P5:d\d+>> DoubleConstant 0.5
/// CHECK-DAG: Return [<<Const0P5>>]
- /// CHECK-START: double Main.DoubleRemainder() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding$after_inlining (after)
/// CHECK-NOT: Rem
public static double DoubleRemainder() {
@@ -695,18 +695,18 @@
* Exercise constant folding on left shift.
*/
- /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<Shl:i\d+>> Shl [<<Const1>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<Shl>>]
- /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const4:i\d+>> IntConstant 4
/// CHECK-DAG: Return [<<Const4>>]
- /// CHECK-START: int Main.ShlIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ShlIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: Shl
public static int ShlIntLong() {
@@ -715,17 +715,17 @@
return lhs << rhs;
}
- /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const3L>>,<<Const2>>]
/// CHECK-DAG: Return [<<Shl>>]
- /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const12L:j\d+>> LongConstant 12
/// CHECK-DAG: Return [<<Const12L>>]
- /// CHECK-START: long Main.ShlLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ShlLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: Shl
public static long ShlLongInt() {
@@ -739,18 +739,18 @@
* Exercise constant folding on right shift.
*/
- /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Const7>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<Shr>>]
- /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: Return [<<Const1>>]
- /// CHECK-START: int Main.ShrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ShrIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: Shr
public static int ShrIntLong() {
@@ -759,17 +759,17 @@
return lhs >> rhs;
}
- /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const9L:j\d+>> LongConstant 9
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Shr:j\d+>> Shr [<<Const9L>>,<<Const2>>]
/// CHECK-DAG: Return [<<Shr>>]
- /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2L>>]
- /// CHECK-START: long Main.ShrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ShrLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: Shr
public static long ShrLongInt() {
@@ -783,18 +783,18 @@
* Exercise constant folding on unsigned right shift.
*/
- /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<ConstM7:i\d+>> IntConstant -7
/// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2
/// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>]
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<ConstM7>>,<<TypeConv>>]
/// CHECK-DAG: Return [<<UShr>>]
- /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<ConstRes:i\d+>> IntConstant 1073741822
/// CHECK-DAG: Return [<<ConstRes>>]
- /// CHECK-START: int Main.UShrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.UShrIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: UShr
public static int UShrIntLong() {
@@ -803,17 +803,17 @@
return lhs >>> rhs;
}
- /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<ConstM9L:j\d+>> LongConstant -9
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<ConstM9L>>,<<Const2>>]
/// CHECK-DAG: Return [<<UShr>>]
- /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<ConstRes:j\d+>> LongConstant 4611686018427387901
/// CHECK-DAG: Return [<<ConstRes>>]
- /// CHECK-START: long Main.UShrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.UShrLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: UShr
public static long UShrLongInt() {
@@ -827,18 +827,18 @@
* Exercise constant folding on logical and.
*/
- /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.AndIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<And>>]
- /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.AndIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.AndIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.AndIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: And
public static long AndIntLong() {
@@ -847,18 +847,18 @@
return lhs & rhs;
}
- /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.AndLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<And>>]
- /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.AndLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const2:j\d+>> LongConstant 2
/// CHECK-DAG: Return [<<Const2>>]
- /// CHECK-START: long Main.AndLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.AndLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: And
public static long AndLongInt() {
@@ -872,18 +872,18 @@
* Exercise constant folding on logical or.
*/
- /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.OrIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<Or>>]
- /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.OrIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
/// CHECK-DAG: Return [<<Const11>>]
- /// CHECK-START: long Main.OrIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.OrIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: Or
public static long OrIntLong() {
@@ -892,18 +892,18 @@
return lhs | rhs;
}
- /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.OrLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Or>>]
- /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.OrLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const11:j\d+>> LongConstant 11
/// CHECK-DAG: Return [<<Const11>>]
- /// CHECK-START: long Main.OrLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.OrLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: Or
public static long OrLongInt() {
@@ -917,18 +917,18 @@
* Exercise constant folding on logical exclusive or.
*/
- /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.XorIntLong() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10:i\d+>> IntConstant 10
/// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>]
/// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const3L>>]
/// CHECK-DAG: Return [<<Xor>>]
- /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.XorIntLong() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
/// CHECK-DAG: Return [<<Const9>>]
- /// CHECK-START: long Main.XorIntLong() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.XorIntLong() constant_folding$after_inlining (after)
/// CHECK-NOT: Xor
public static long XorIntLong() {
@@ -937,18 +937,18 @@
return lhs ^ rhs;
}
- /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.XorLongInt() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
/// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Xor>>]
- /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.XorLongInt() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const9:j\d+>> LongConstant 9
/// CHECK-DAG: Return [<<Const9>>]
- /// CHECK-START: long Main.XorLongInt() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.XorLongInt() constant_folding$after_inlining (after)
/// CHECK-NOT: Xor
public static long XorLongInt() {
@@ -962,17 +962,17 @@
* Exercise constant folding on constant (static) condition.
*/
- /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.StaticCondition() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Cond:z\d+>> GreaterThanOrEqual [<<Const7>>,<<Const2>>]
/// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Cond>>]
- /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.StaticCondition() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Const1>>]
- /// CHECK-START: int Main.StaticCondition() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.StaticCondition() constant_folding$after_inlining (after)
/// CHECK-NOT: GreaterThanOrEqual
public static int StaticCondition() {
@@ -991,16 +991,16 @@
* Exercise constant folding on constant (static) condition for null references.
*/
- /// CHECK-START: int Main.StaticConditionNulls() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.StaticConditionNulls() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Null>>,<<Null>>]
/// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Cond>>]
- /// CHECK-START: int Main.StaticConditionNulls() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.StaticConditionNulls() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: Select [{{i\d+}},{{i\d+}},<<Const0>>]
- /// CHECK-START: int Main.StaticConditionNulls() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.StaticConditionNulls() constant_folding$after_inlining (after)
/// CHECK-NOT: NotEqual
private static Object getNull() {
@@ -1023,7 +1023,7 @@
* (forward) post-order traversal of the the dominator tree.
*/
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding$after_inlining (before)
/// CHECK-DAG: <<Cond:z\d+>> ParameterValue
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
@@ -1032,14 +1032,14 @@
/// CHECK-DAG: <<Phi:i\d+>> Select [<<Sub>>,<<Add>>,<<Cond>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding$after_inlining (after)
/// CHECK-DAG: <<Cond:z\d+>> ParameterValue
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: <<Phi:i\d+>> Select [<<Const3>>,<<Const7>>,<<Cond>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding$after_inlining (after)
/// CHECK-NOT: Add
/// CHECK-NOT: Sub
@@ -1325,16 +1325,16 @@
* Exercise constant folding on type conversions.
*/
- /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const33:j\d+>> LongConstant 33
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: int Main.ReturnInt33() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnInt33() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnInt33() {
@@ -1342,16 +1342,16 @@
return (int) imm;
}
- /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding$after_inlining (before)
/// CHECK-DAG: <<ConstMax:f\d+>> FloatConstant 1e+34
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstMax>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding$after_inlining (after)
/// CHECK-DAG: <<ConstMax:i\d+>> IntConstant 2147483647
/// CHECK-DAG: Return [<<ConstMax>>]
- /// CHECK-START: int Main.ReturnIntMax() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnIntMax() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnIntMax() {
@@ -1359,16 +1359,16 @@
return (int) imm;
}
- /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (before)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding$after_inlining (before)
/// CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
/// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstNaN>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: Return [<<Const0>>]
- /// CHECK-START: int Main.ReturnInt0() constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.ReturnInt0() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static int ReturnInt0() {
@@ -1376,16 +1376,16 @@
return (int) imm;
}
- /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const33:j\d+>> LongConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: long Main.ReturnLong33() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong33() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong33() {
@@ -1393,16 +1393,16 @@
return (long) imm;
}
- /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: long Main.ReturnLong34() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong34() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong34() {
@@ -1410,16 +1410,16 @@
return (long) imm;
}
- /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (before)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding$after_inlining (before)
/// CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
/// CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<ConstNaN>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
/// CHECK-DAG: Return [<<Const0>>]
- /// CHECK-START: long Main.ReturnLong0() constant_folding_after_inlining (after)
+ /// CHECK-START: long Main.ReturnLong0() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static long ReturnLong0() {
@@ -1427,16 +1427,16 @@
return (long) imm;
}
- /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const33:f\d+>> FloatConstant 33
/// CHECK-DAG: Return [<<Const33>>]
- /// CHECK-START: float Main.ReturnFloat33() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat33() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat33() {
@@ -1444,16 +1444,16 @@
return (float) imm;
}
- /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: float Main.ReturnFloat34() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat34() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat34() {
@@ -1461,16 +1461,16 @@
return (float) imm;
}
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (before)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
/// CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
/// CHECK-DAG: Return [<<Const>>]
- /// CHECK-START: float Main.ReturnFloat99P25() constant_folding_after_inlining (after)
+ /// CHECK-START: float Main.ReturnFloat99P25() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static float ReturnFloat99P25() {
@@ -1478,12 +1478,12 @@
return (float) imm;
}
- /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.ReturnDouble33() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const33>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble33() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.ReturnDouble33() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const33:d\d+>> DoubleConstant 33
/// CHECK-DAG: Return [<<Const33>>]
@@ -1492,16 +1492,16 @@
return (double) imm;
}
- /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const34:j\d+>> LongConstant 34
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const34>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const34:d\d+>> DoubleConstant 34
/// CHECK-DAG: Return [<<Const34>>]
- /// CHECK-START: double Main.ReturnDouble34() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.ReturnDouble34() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static double ReturnDouble34() {
@@ -1509,16 +1509,16 @@
return (double) imm;
}
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (before)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding$after_inlining (before)
/// CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
/// CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const>>]
/// CHECK-DAG: Return [<<Convert>>]
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding$after_inlining (after)
/// CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
/// CHECK-DAG: Return [<<Const>>]
- /// CHECK-START: double Main.ReturnDouble99P25() constant_folding_after_inlining (after)
+ /// CHECK-START: double Main.ReturnDouble99P25() constant_folding$after_inlining (after)
/// CHECK-NOT: TypeConversion
public static double ReturnDouble99P25() {
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index c125e33..3a56c3b 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -1380,7 +1380,7 @@
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo9(int[], boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: void Main.foo9(int[], boolean) instruction_simplifier$after_bce (after)
// Simplification removes the redundant check
/// CHECK: Deoptimize
/// CHECK: Deoptimize
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 36f14d8..6e453af 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -103,7 +103,7 @@
/// CHECK-NOT: CheckCast
public String testClassRemove() {
Object s = SubclassA.class;
- return ((Class)s).getName();
+ return ((Class<?>)s).getName();
}
/// CHECK-START: java.lang.String Main.testClassKeep() instruction_simplifier (before)
@@ -214,11 +214,11 @@
/// CHECK-DAG: <<IOf:z\d+>> InstanceOf
/// CHECK-DAG: If [<<IOf>>]
- /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (before)
+ /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier$after_bce (before)
/// CHECK: CheckCast
/// CHECK-NOT: CheckCast
- /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (after)
+ /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier$after_bce (after)
/// CHECK-NOT: CheckCast
public void testInstanceOf_Inlined(Object o) {
if (!$inline$InstanceofSubclassC(o)) {
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 040479e..5b14735 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -876,7 +876,7 @@
/// CHECK-NOT: Neg
/// CHECK-NOT: Add
- /// CHECK-START: int Main.$noinline$NegNeg2(int) constant_folding_after_inlining (after)
+ /// CHECK-START: int Main.$noinline$NegNeg2(int) constant_folding$after_inlining (after)
/// CHECK: <<Const0:i\d+>> IntConstant 0
/// CHECK-NOT: Neg
/// CHECK-NOT: Add
@@ -1126,7 +1126,7 @@
return res;
}
- /// CHECK-START: boolean Main.$noinline$EqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$noinline$EqualBoolVsIntConst(boolean) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -1136,7 +1136,7 @@
/// CHECK-DAG: <<NotCond:i\d+>> Select [<<Const1>>,<<Const0>>,<<Cond>>]
/// CHECK-DAG: Return [<<NotCond>>]
- /// CHECK-START: boolean Main.$noinline$EqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$noinline$EqualBoolVsIntConst(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<True:i\d+>> IntConstant 1
/// CHECK-DAG: Return [<<True>>]
@@ -1151,7 +1151,7 @@
return arg;
}
- /// CHECK-START: boolean Main.$noinline$NotEqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$noinline$NotEqualBoolVsIntConst(boolean) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -1161,7 +1161,7 @@
/// CHECK-DAG: <<NotCond:i\d+>> Select [<<Const1>>,<<Const0>>,<<Cond>>]
/// CHECK-DAG: Return [<<NotCond>>]
- /// CHECK-START: boolean Main.$noinline$NotEqualBoolVsIntConst(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$noinline$NotEqualBoolVsIntConst(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<False:i\d+>> IntConstant 0
/// CHECK-DAG: Return [<<False>>]
@@ -1178,7 +1178,7 @@
* remove the second.
*/
- /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -1186,7 +1186,7 @@
/// CHECK-DAG: <<NotNotArg:i\d+>> Select [<<Const1>>,<<Const0>>,<<NotArg>>]
/// CHECK-DAG: Return [<<NotNotArg>>]
- /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -1317,7 +1317,7 @@
return arg * 31;
}
- /// CHECK-START: int Main.$noinline$booleanFieldNotEqualOne() instruction_simplifier_after_bce (before)
+ /// CHECK-START: int Main.$noinline$booleanFieldNotEqualOne() instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1327,7 +1327,7 @@
/// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Select>>]
- /// CHECK-START: int Main.$noinline$booleanFieldNotEqualOne() instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.$noinline$booleanFieldNotEqualOne() instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
@@ -1340,7 +1340,7 @@
return (booleanField == $inline$true()) ? 13 : 54;
}
- /// CHECK-START: int Main.$noinline$booleanFieldEqualZero() instruction_simplifier_after_bce (before)
+ /// CHECK-START: int Main.$noinline$booleanFieldEqualZero() instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1350,7 +1350,7 @@
/// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Select>>]
- /// CHECK-START: int Main.$noinline$booleanFieldEqualZero() instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.$noinline$booleanFieldEqualZero() instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
@@ -1363,7 +1363,7 @@
return (booleanField != $inline$false()) ? 13 : 54;
}
- /// CHECK-START: int Main.$noinline$intConditionNotEqualOne(int) instruction_simplifier_after_bce (before)
+ /// CHECK-START: int Main.$noinline$intConditionNotEqualOne(int) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -1376,7 +1376,7 @@
/// CHECK-DAG: <<Result:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.$noinline$intConditionNotEqualOne(int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.$noinline$intConditionNotEqualOne(int) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
@@ -1392,7 +1392,7 @@
return ((i > 42) == $inline$true()) ? 13 : 54;
}
- /// CHECK-START: int Main.$noinline$intConditionEqualZero(int) instruction_simplifier_after_bce (before)
+ /// CHECK-START: int Main.$noinline$intConditionEqualZero(int) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
@@ -1405,7 +1405,7 @@
/// CHECK-DAG: <<Result:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.$noinline$intConditionEqualZero(int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.$noinline$intConditionEqualZero(int) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
@@ -1426,7 +1426,7 @@
/// CHECK-START: int Main.$noinline$floatConditionNotEqualOne(float) builder (after)
/// CHECK: LessThanOrEqual
- /// CHECK-START: int Main.$noinline$floatConditionNotEqualOne(float) instruction_simplifier_before_codegen (after)
+ /// CHECK-START: int Main.$noinline$floatConditionNotEqualOne(float) instruction_simplifier$before_codegen (after)
/// CHECK-DAG: <<Arg:f\d+>> ParameterValue
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1443,7 +1443,7 @@
/// CHECK-START: int Main.$noinline$doubleConditionEqualZero(double) builder (after)
/// CHECK: LessThanOrEqual
- /// CHECK-START: int Main.$noinline$doubleConditionEqualZero(double) instruction_simplifier_before_codegen (after)
+ /// CHECK-START: int Main.$noinline$doubleConditionEqualZero(double) instruction_simplifier$before_codegen (after)
/// CHECK-DAG: <<Arg:d\d+>> ParameterValue
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1859,7 +1859,7 @@
if (doThrow) { throw new Error(); }
try {
Class<?> c = Class.forName("SmaliTests");
- Method m = c.getMethod(name, new Class[] { boolean.class });
+ Method m = c.getMethod(name, boolean.class);
return (Integer) m.invoke(null, input);
} catch (Exception ex) {
throw new Error(ex);
diff --git a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java
index 171ade8..2056e2f 100644
--- a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java
+++ b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java
@@ -38,25 +38,25 @@
return "OtherDex";
}
- public static Class returnOtherDexClass() {
+ public static Class<?> returnOtherDexClass() {
return OtherDex.class;
}
- public static Class returnMainClass() {
+ public static Class<?> returnMainClass() {
return Main.class;
}
- private static Class returnOtherDexClass2() {
+ private static Class<?> returnOtherDexClass2() {
return OtherDex.class;
}
- public static Class returnOtherDexClassStaticCall() {
+ public static Class<?> returnOtherDexClassStaticCall() {
// Do not call returnOtherDexClass, as it may have been flagged
// as non-inlineable.
return returnOtherDexClass2();
}
- public static Class returnOtherDexCallingMain() {
+ public static Class<?> returnOtherDexCallingMain() {
return Main.getOtherClass();
}
diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-across-dex-files/src/Main.java
index 1fe49a8..c2bb479 100644
--- a/test/462-checker-inlining-across-dex-files/src/Main.java
+++ b/test/462-checker-inlining-across-dex-files/src/Main.java
@@ -106,7 +106,7 @@
/// CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
/// CHECK-DAG: Return [<<Invoke>>]
- public static Class dontInlineOtherDexClass() {
+ public static Class<?> dontInlineOtherDexClass() {
return OtherDex.returnOtherDexClass();
}
@@ -123,7 +123,7 @@
// Note: There are two LoadClass instructions. We obtain the correct
// instruction id by matching the Return's input list first.
- public static Class inlineMainClass() {
+ public static Class<?> inlineMainClass() {
return OtherDex.returnMainClass();
}
@@ -135,7 +135,7 @@
/// CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
/// CHECK-DAG: Return [<<Invoke>>]
- public static Class dontInlineOtherDexClassStaticCall() {
+ public static Class<?> dontInlineOtherDexClassStaticCall() {
return OtherDex.returnOtherDexClassStaticCall();
}
@@ -152,11 +152,11 @@
// Note: There are two LoadClass instructions. We obtain the correct
// instruction id by matching the Return's input list first.
- public static Class inlineOtherDexCallingMain() {
+ public static Class<?> inlineOtherDexCallingMain() {
return OtherDex.returnOtherDexCallingMain();
}
- public static Class getOtherClass() {
+ public static Class<?> getOtherClass() {
return Main.class;
}
diff --git a/test/471-uninitialized-locals/src/Main.java b/test/471-uninitialized-locals/src/Main.java
index a5b1c48..1ac749e 100644
--- a/test/471-uninitialized-locals/src/Main.java
+++ b/test/471-uninitialized-locals/src/Main.java
@@ -24,8 +24,8 @@
public static void main(String args[]) throws Exception {
try {
Class<?> c = Class.forName("Test");
- Method m = c.getMethod("ThrowException", (Class[]) null);
- m.invoke(null, (Object[]) null);
+ Method m = c.getMethod("ThrowException");
+ m.invoke(null);
} catch (VerifyError e) {
// Compilation should go fine but we expect the runtime verification to fail.
return;
diff --git a/test/472-unreachable-if-regression/src/Main.java b/test/472-unreachable-if-regression/src/Main.java
index c9f9511..d426df1 100644
--- a/test/472-unreachable-if-regression/src/Main.java
+++ b/test/472-unreachable-if-regression/src/Main.java
@@ -25,12 +25,12 @@
System.out.println("Test started.");
Class<?> c = Class.forName("Test");
- Method unreachableIf = c.getMethod("UnreachableIf", (Class[]) null);
- unreachableIf.invoke(null, (Object[]) null);
+ Method unreachableIf = c.getMethod("UnreachableIf");
+ unreachableIf.invoke(null);
System.out.println("Successfully called UnreachableIf().");
- Method unreachablePackedSwitch = c.getMethod("UnreachablePackedSwitch", (Class[]) null);
- unreachablePackedSwitch.invoke(null, (Object[]) null);
+ Method unreachablePackedSwitch = c.getMethod("UnreachablePackedSwitch");
+ unreachablePackedSwitch.invoke(null);
System.out.println("Successfully called UnreachablePackedSwitch().");
}
diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java
index e5171f0..141054d 100644
--- a/test/480-checker-dead-blocks/src/Main.java
+++ b/test/480-checker-dead-blocks/src/Main.java
@@ -30,7 +30,7 @@
return false;
}
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (before)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: If
@@ -39,13 +39,13 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: <<Add:i\d+>> Add [<<ArgX>>,<<ArgY>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
/// CHECK-NOT: If
/// CHECK-NOT: Sub
/// CHECK-NOT: Phi
@@ -62,7 +62,7 @@
return z;
}
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (before)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: If
@@ -71,13 +71,13 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<ArgX>>,<<ArgY>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
/// CHECK-NOT: Phi
@@ -94,10 +94,10 @@
return z;
}
- /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (before)
/// CHECK: Mul
- /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (after)
/// CHECK-NOT: Mul
public static int testRemoveLoop(int x) {
@@ -109,11 +109,11 @@
return x;
}
- /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (before)
/// CHECK-DAG: Return
/// CHECK-DAG: Exit
- /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (after)
/// CHECK-NOT: Return
/// CHECK-NOT: Exit
@@ -124,15 +124,15 @@
return x;
}
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (before)
/// CHECK-DAG: If
/// CHECK-DAG: Add
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
@@ -143,16 +143,16 @@
return x;
}
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (before)
/// CHECK-DAG: If
/// CHECK-DAG: If
/// CHECK-DAG: Add
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
@@ -165,13 +165,13 @@
return x;
}
- /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (before)
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK-NOT: SuspendCheck
- /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (after)
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK-NOT: SuspendCheck
diff --git a/test/485-checker-dce-loop-update/smali/TestCase.smali b/test/485-checker-dce-loop-update/smali/TestCase.smali
index 056f22c..e3617c7 100644
--- a/test/485-checker-dce-loop-update/smali/TestCase.smali
+++ b/test/485-checker-dce-loop-update/smali/TestCase.smali
@@ -23,7 +23,7 @@
.end method
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
@@ -36,7 +36,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
@@ -73,7 +73,7 @@
.end method
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -88,7 +88,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -129,7 +129,7 @@
.end method
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -146,7 +146,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<SelX>>] loop:none
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -194,7 +194,7 @@
.end method
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -217,7 +217,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
diff --git a/test/485-checker-dce-switch/src/Main.java b/test/485-checker-dce-switch/src/Main.java
index 019d876..7d5fd4f 100644
--- a/test/485-checker-dce-switch/src/Main.java
+++ b/test/485-checker-dce-switch/src/Main.java
@@ -20,14 +20,14 @@
return 5;
}
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination_final (before)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
/// CHECK-DAG: <<Const100:i\d+>> IntConstant 100
/// CHECK-DAG: Return [<<Const100>>]
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination_final (after)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
/// CHECK-NOT: PackedSwitch
public static int wholeSwitchDead(int j) {
@@ -60,14 +60,14 @@
return l;
}
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination_final (before)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: Return [<<Const7>>]
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_InRange() {
@@ -96,14 +96,14 @@
return i;
}
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination_final (before)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
/// CHECK-DAG: <<Const15:i\d+>> IntConstant 15
/// CHECK-DAG: Return [<<Const15>>]
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_AboveRange() {
@@ -132,14 +132,14 @@
return i;
}
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination_final (before)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
/// CHECK-DAG: <<ConstM5:i\d+>> IntConstant -5
/// CHECK-DAG: Return [<<ConstM5>>]
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination_final (after)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_BelowRange() {
diff --git a/test/489-current-method-regression/src/Main.java b/test/489-current-method-regression/src/Main.java
index 7d102f5..285c41d 100644
--- a/test/489-current-method-regression/src/Main.java
+++ b/test/489-current-method-regression/src/Main.java
@@ -23,7 +23,7 @@
if (a == 42) {
// The class loading will be seen as dead code by
// the optimizer.
- Class c = Main.class;
+ Class<?> c = Main.class;
}
return new Main().bar();
}
diff --git a/test/496-checker-inlining-and-class-loader/src/Main.java b/test/496-checker-inlining-and-class-loader/src/Main.java
index 78e8a40..15d4dc0 100644
--- a/test/496-checker-inlining-and-class-loader/src/Main.java
+++ b/test/496-checker-inlining-and-class-loader/src/Main.java
@@ -69,7 +69,7 @@
"loadClassBinaryName", String.class, ClassLoader.class, List.class);
if (dexFile != null) {
- Class clazz = (Class)method.invoke(dexFile, className, this, null);
+ Class<?> clazz = (Class<?>)method.invoke(dexFile, className, this, null);
if (clazz != null) {
return clazz;
}
@@ -124,7 +124,7 @@
public class Main {
public static void main(String[] args) throws Exception {
MyClassLoader o = new MyClassLoader();
- Class foo = o.loadClass("LoadedByMyClassLoader");
+ Class<?> foo = o.loadClass("LoadedByMyClassLoader");
Method m = foo.getDeclaredMethod("bar");
m.invoke(null);
}
diff --git a/test/497-inlining-and-class-loader/src/Main.java b/test/497-inlining-and-class-loader/src/Main.java
index 832b1f0..1e27e77 100644
--- a/test/497-inlining-and-class-loader/src/Main.java
+++ b/test/497-inlining-and-class-loader/src/Main.java
@@ -66,7 +66,7 @@
"loadClassBinaryName", String.class, ClassLoader.class, List.class);
if (dex != null) {
- Class clazz = (Class)method.invoke(dex, className, this, null);
+ Class<?> clazz = (Class<?>)method.invoke(dex, className, this, null);
if (clazz != null) {
return clazz;
}
@@ -92,7 +92,7 @@
MyClassLoader o = new MyClassLoader();
MyClassLoader.level1ClassLoader = new MyClassLoader();
- Class foo = o.loadClass("LoadedByMyClassLoader");
+ Class<?> foo = o.loadClass("LoadedByMyClassLoader");
Method m = foo.getDeclaredMethod("bar");
try {
m.invoke(null);
diff --git a/test/501-regression-packed-switch/src/Main.java b/test/501-regression-packed-switch/src/Main.java
index 12bc1a8..74c081a 100644
--- a/test/501-regression-packed-switch/src/Main.java
+++ b/test/501-regression-packed-switch/src/Main.java
@@ -24,12 +24,12 @@
public static void main(String args[]) throws Exception {
Class<?> c = Class.forName("Test");
- Method m = c.getMethod("EmptyPackedSwitch", new Class[] { int.class });
+ Method m = c.getMethod("EmptyPackedSwitch", int.class);
Integer result = (Integer) m.invoke(null, new Integer(42));
if (result != 5) {
throw new Error("Expected 5, got " + result);
}
- m = c.getMethod("PackedSwitchAfterData", new Class[] { int.class });
+ m = c.getMethod("PackedSwitchAfterData", int.class);
result = (Integer) m.invoke(null, new Integer(0));
if (result != 1) {
throw new Error("Expected 1, got " + result);
diff --git a/test/504-regression-baseline-entry/src/Main.java b/test/504-regression-baseline-entry/src/Main.java
index 2c9df28..284cbdc 100644
--- a/test/504-regression-baseline-entry/src/Main.java
+++ b/test/504-regression-baseline-entry/src/Main.java
@@ -24,7 +24,7 @@
public static void main(String args[]) throws Exception {
Class<?> c = Class.forName("Test");
- Method m = c.getMethod("SingleGotoStart", (Class[]) null);
+ Method m = c.getMethod("SingleGotoStart");
Integer result = (Integer) m.invoke(null);
if (result != 5) {
throw new Error("Expected 5, got " + result);
diff --git a/test/510-checker-try-catch/smali/Builder.smali b/test/510-checker-try-catch/smali/Builder.smali
index 733a1dd..b0bffa5 100644
--- a/test/510-checker-try-catch/smali/Builder.smali
+++ b/test/510-checker-try-catch/smali/Builder.smali
@@ -1360,7 +1360,7 @@
# Test that a throw-catch loop on monitor-exit is eliminated.
# Note that we do not test this until after DCE which merges trivially split blocks.
-## CHECK-START: int Builder.testSynchronized(java.lang.Object) dead_code_elimination (after)
+## CHECK-START: int Builder.testSynchronized(java.lang.Object) dead_code_elimination$initial (after)
## CHECK: flags "catch_block"
## CHECK-NOT: end_block
## CHECK: MonitorOperation kind:exit
diff --git a/test/510-checker-try-catch/src/Main.java b/test/510-checker-try-catch/src/Main.java
index 25cdc0e..d6dcd30 100644
--- a/test/510-checker-try-catch/src/Main.java
+++ b/test/510-checker-try-catch/src/Main.java
@@ -39,7 +39,7 @@
public static void testMethod(String method) throws Exception {
Class<?> c = Class.forName("Runtime");
- Method m = c.getMethod(method, new Class[] { boolean.class, boolean.class });
+ Method m = c.getMethod(method, boolean.class, boolean.class);
for (TestPath path : TestPath.values()) {
Object[] arguments = new Object[] { path.arg1, path.arg2 };
diff --git a/test/517-checker-builder-fallthrough/src/Main.java b/test/517-checker-builder-fallthrough/src/Main.java
index 23d94e6..14170f5 100644
--- a/test/517-checker-builder-fallthrough/src/Main.java
+++ b/test/517-checker-builder-fallthrough/src/Main.java
@@ -20,7 +20,7 @@
public static int runTest(int input) throws Exception {
Class<?> c = Class.forName("TestCase");
- Method m = c.getMethod("testCase", new Class[] { int.class });
+ Method m = c.getMethod("testCase", int.class);
return (Integer) m.invoke(null, input);
}
diff --git a/test/522-checker-regression-monitor-exit/smali/Test.smali b/test/522-checker-regression-monitor-exit/smali/Test.smali
index c8e9198..72583d2 100644
--- a/test/522-checker-regression-monitor-exit/smali/Test.smali
+++ b/test/522-checker-regression-monitor-exit/smali/Test.smali
@@ -17,11 +17,11 @@
.super Ljava/lang/Object;
-## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (before)
+## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination$initial (before)
## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter
## CHECK: MonitorOperation [<<Param>>] kind:exit
-## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (after)
+## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination$initial (after)
## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter
## CHECK: MonitorOperation [<<Param>>] kind:exit
diff --git a/test/522-checker-regression-monitor-exit/src/Main.java b/test/522-checker-regression-monitor-exit/src/Main.java
index c85ac96..a5e9512 100644
--- a/test/522-checker-regression-monitor-exit/src/Main.java
+++ b/test/522-checker-regression-monitor-exit/src/Main.java
@@ -40,7 +40,7 @@
Integer result;
try {
Class<?> c = Class.forName("Test");
- Method m = c.getMethod("synchronizedHashCode", new Class[] { Object.class });
+ Method m = c.getMethod("synchronizedHashCode", Object.class);
result = (Integer) m.invoke(null, m_obj);
} catch (Exception e) {
System.err.println("Hash code query exception");
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index 3366f20..9435ef1 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -189,7 +189,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN_after_arch (after)
+ /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
/// CHECK: <<Array:l\d+>> NullCheck
@@ -220,7 +220,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM: void Main.getSet(int[], int) GVN_after_arch (after)
+ /// CHECK-START-ARM: void Main.getSet(int[], int) GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
/// CHECK: <<Array:l\d+>> NullCheck
@@ -260,7 +260,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN_after_arch (after)
+ /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
/// CHECK: <<Array:l\d+>> NullCheck
@@ -294,7 +294,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM: int[] Main.accrossGC(int[], int) GVN_after_arch (after)
+ /// CHECK-START-ARM: int[] Main.accrossGC(int[], int) GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant
/// CHECK: <<Array:l\d+>> NullCheck
@@ -349,7 +349,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN_after_arch (after)
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
@@ -386,7 +386,7 @@
/// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>]
- /// CHECK-START-ARM: int Main.canMergeAfterBCE1() GVN_after_arch (after)
+ /// CHECK-START-ARM: int Main.canMergeAfterBCE1() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
@@ -445,7 +445,7 @@
/// CHECK: <<Address3:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
- /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
@@ -461,7 +461,7 @@
// There should be only one intermediate address computation in the loop.
- /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK: IntermediateAddress
/// CHECK-NOT: IntermediateAddress
@@ -494,7 +494,7 @@
/// CHECK: <<Address3:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>]
/// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
- /// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<DataOffset:i\d+>> IntConstant 12
/// CHECK: <<Array:l\d+>> NewArray
@@ -508,7 +508,7 @@
/// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>]
/// CHECK: ArraySet [<<Address>>,<<Index1>>,<<Add>>]
- /// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+ /// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN$after_arch (after)
/// CHECK: IntermediateAddress
/// CHECK-NOT: IntermediateAddress
diff --git a/test/530-checker-loops3/src/Main.java b/test/530-checker-loops3/src/Main.java
index 5ffcbe9..6b5c657 100644
--- a/test/530-checker-loops3/src/Main.java
+++ b/test/530-checker-loops3/src/Main.java
@@ -132,7 +132,7 @@
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-NOT: Deoptimize
//
- /// CHECK-START: void Main.multipleUnitStrides(int[], int[]) instruction_simplifier_after_bce (after)
+ /// CHECK-START: void Main.multipleUnitStrides(int[], int[]) instruction_simplifier$after_bce (after)
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-DAG: Deoptimize loop:none
@@ -164,7 +164,7 @@
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-NOT: Deoptimize
//
- /// CHECK-START: void Main.multipleUnitStridesConditional(int[], int[]) instruction_simplifier_after_bce (after)
+ /// CHECK-START: void Main.multipleUnitStridesConditional(int[], int[]) instruction_simplifier$after_bce (after)
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-DAG: Deoptimize loop:none
@@ -196,7 +196,7 @@
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-NOT: Deoptimize
//
- /// CHECK-START: void Main.shifter(int[]) instruction_simplifier_after_bce (after)
+ /// CHECK-START: void Main.shifter(int[]) instruction_simplifier$after_bce (after)
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-DAG: Deoptimize loop:none
/// CHECK-NOT: Deoptimize
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
index 17b11db..19b7fb7 100644
--- a/test/540-checker-rtp-bug/src/Main.java
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -48,7 +48,7 @@
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: InstanceOf [<<Phi>>,<<Class>>]
- /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) dead_code_elimination (after)
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) dead_code_elimination$initial (after)
/// CHECK: <<Phi:l\d+>> Phi
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: InstanceOf [<<Phi>>,<<Class>>]
diff --git a/test/542-unresolved-access-check/src/Main.java b/test/542-unresolved-access-check/src/Main.java
index 2bdf47f..62bfea1 100644
--- a/test/542-unresolved-access-check/src/Main.java
+++ b/test/542-unresolved-access-check/src/Main.java
@@ -58,7 +58,7 @@
"loadClassBinaryName", String.class, ClassLoader.class, List.class);
if (dex != null) {
- Class clazz = (Class)method.invoke(dex, className, this, null);
+ Class<?> clazz = (Class<?>)method.invoke(dex, className, this, null);
if (clazz != null) {
return clazz;
}
@@ -72,7 +72,7 @@
public class Main {
public static void main(String[] args) throws Exception {
MyClassLoader o = new MyClassLoader();
- Class foo = o.loadClass("LoadedByMyClassLoader");
+ Class<?> foo = o.loadClass("LoadedByMyClassLoader");
Method m = foo.getDeclaredMethod("main");
m.invoke(null);
}
diff --git a/test/543-checker-dce-trycatch/smali/TestCase.smali b/test/543-checker-dce-trycatch/smali/TestCase.smali
index 9f9916d..5557c7b 100644
--- a/test/543-checker-dce-trycatch/smali/TestCase.smali
+++ b/test/543-checker-dce-trycatch/smali/TestCase.smali
@@ -26,18 +26,18 @@
# Test a case when one entering TryBoundary is dead but the rest of the try
# block remains live.
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
## CHECK: Add
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
## CHECK: TryBoundary kind:entry
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
## CHECK-NOT: Add
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
@@ -71,18 +71,18 @@
# Test a case when one exiting TryBoundary is dead but the rest of the try
# block remains live.
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
## CHECK: Add
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
## CHECK: TryBoundary kind:exit
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
## CHECK-NOT: Add
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
@@ -117,21 +117,21 @@
# Test that a catch block remains live and consistent if some of try blocks
# throwing into it are removed.
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
## CHECK: TryBoundary kind:entry
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
## CHECK: TryBoundary kind:exit
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
@@ -203,7 +203,7 @@
# Test that DCE removes catch phi uses of instructions defined in dead try blocks.
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (before)
## CHECK-DAG: <<Arg0:i\d+>> ParameterValue
## CHECK-DAG: <<Arg1:i\d+>> ParameterValue
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
@@ -220,7 +220,7 @@
## CHECK-DAG: Phi [<<Add>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
## CHECK-DAG: Phi [<<Select>>,<<Const0x10>>,<<Const0x11>>] reg:3 is_catch_phi:true
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (after)
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
## CHECK-DAG: <<Const0xd:i\d+>> IntConstant 13
@@ -277,7 +277,7 @@
# Test that DCE does not remove catch phi uses of instructions defined outside
# dead try blocks.
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination_final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (before)
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
@@ -287,7 +287,7 @@
## CHECK-DAG: Phi [<<Const0xa>>,<<Const0xb>>,<<Const0xd>>] reg:1 is_catch_phi:true
## CHECK-DAG: Phi [<<Const0xf>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination_final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (after)
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
diff --git a/test/543-checker-dce-trycatch/src/Main.java b/test/543-checker-dce-trycatch/src/Main.java
index 6e73d0d..19587e7 100644
--- a/test/543-checker-dce-trycatch/src/Main.java
+++ b/test/543-checker-dce-trycatch/src/Main.java
@@ -35,10 +35,10 @@
// where TryBoundary still has exception handler successors after having removed
// some already.
- /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination_final (after)
+ /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
/// CHECK-NOT: TryBoundary
- /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination_final (after)
+ /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
/// CHECK: begin_block
/// CHECK: begin_block
/// CHECK: begin_block
diff --git a/test/545-tracing-and-jit/src/Main.java b/test/545-tracing-and-jit/src/Main.java
index a2d51d5..f365c6e 100644
--- a/test/545-tracing-and-jit/src/Main.java
+++ b/test/545-tracing-and-jit/src/Main.java
@@ -226,7 +226,7 @@
private static final Method getMethodTracingModeMethod;
static {
try {
- Class c = Class.forName("dalvik.system.VMDebug");
+ Class<?> c = Class.forName("dalvik.system.VMDebug");
startMethodTracingMethod = c.getDeclaredMethod("startMethodTracing", String.class,
Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
diff --git a/test/552-checker-primitive-typeprop/src/Main.java b/test/552-checker-primitive-typeprop/src/Main.java
index fe2343e..1296800 100644
--- a/test/552-checker-primitive-typeprop/src/Main.java
+++ b/test/552-checker-primitive-typeprop/src/Main.java
@@ -29,15 +29,15 @@
public static void main(String[] args) throws Exception {
Class<?> c = Class.forName("SsaBuilder");
- Method m = c.getMethod("environmentPhi", new Class[] { boolean.class, int[].class });
+ Method m = c.getMethod("environmentPhi", boolean.class, int[].class);
int[] array = new int[3];
int result;
- result = (Integer) m.invoke(null, new Object[] { true, array } );
+ result = (Integer) m.invoke(null, true, array);
assertEquals(2, result);
- result = (Integer) m.invoke(null, new Object[] { false, array } );
+ result = (Integer) m.invoke(null, false, array);
assertEquals(0, result);
}
}
diff --git a/test/557-checker-instruction-simplifier-ror/src/Main.java b/test/557-checker-instruction-simplifier-ror/src/Main.java
index 6d8b74d..0e3d145 100644
--- a/test/557-checker-instruction-simplifier-ror/src/Main.java
+++ b/test/557-checker-instruction-simplifier-ror/src/Main.java
@@ -175,7 +175,7 @@
// (i >>> #distance) | (i << #-distance)
- /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (before)
+ /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier$after_bce (before)
/// CHECK: <<ArgValue:i\d+>> ParameterValue
/// CHECK: <<Const2:i\d+>> IntConstant 2
/// CHECK: <<ConstNeg2:i\d+>> IntConstant -2
@@ -184,13 +184,13 @@
/// CHECK: <<Or:i\d+>> Or [<<UShr>>,<<Shl>>]
/// CHECK: Return [<<Or>>]
- /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier$after_bce (after)
/// CHECK: <<ArgValue:i\d+>> ParameterValue
/// CHECK: <<Const2:i\d+>> IntConstant 2
/// CHECK: <<Ror:i\d+>> Ror [<<ArgValue>>,<<Const2>>]
/// CHECK: Return [<<Ror>>]
- /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier$after_bce (after)
/// CHECK-NOT: UShr
/// CHECK-NOT: Shl
public static int ror_int_constant_c_negc(int value) {
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index 7ce60a3..5d4aa56 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -28,7 +28,7 @@
# exit \- \
# other_loop_entry
#
-## CHECK-START: int IrreducibleLoop.simpleLoop(int) dead_code_elimination (before)
+## CHECK-START: int IrreducibleLoop.simpleLoop(int) dead_code_elimination$initial (before)
## CHECK: irreducible:true
.method public static simpleLoop(I)I
.registers 2
@@ -65,7 +65,7 @@
# other_loop_entry
# set 30 in p1:myField
#
-## CHECK-START: int IrreducibleLoop.lse(int, Main) dead_code_elimination (after)
+## CHECK-START: int IrreducibleLoop.lse(int, Main) dead_code_elimination$initial (after)
## CHECK: irreducible:true
#
## CHECK-START: int IrreducibleLoop.lse(int, Main) load_store_elimination (after)
@@ -101,10 +101,10 @@
# exit \- \
# other_loop_entry
#
-## CHECK-START: int IrreducibleLoop.dce(int) dead_code_elimination (before)
+## CHECK-START: int IrreducibleLoop.dce(int) dead_code_elimination$initial (before)
## CHECK: irreducible:true
-## CHECK-START: int IrreducibleLoop.dce(int) dead_code_elimination (after)
+## CHECK-START: int IrreducibleLoop.dce(int) dead_code_elimination$initial (after)
## CHECK: irreducible:true
.method public static dce(I)I
.registers 3
diff --git a/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
index b82ed92..75344f7 100644
--- a/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -16,7 +16,7 @@
.super Ljava/lang/Object;
-## CHECK-START-X86: int IrreducibleLoop.simpleLoop(int) dead_code_elimination (before)
+## CHECK-START-X86: int IrreducibleLoop.simpleLoop(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
## CHECK-DAG: InvokeStaticOrDirect [<<Constant>>,<<Method>>] loop:{{B\d+}} irreducible:true
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index e426b75..811c280 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -70,7 +70,7 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
@@ -80,18 +80,18 @@
/// CHECK: <<And:i\d+>> And [<<Select2>>,<<Select1>>]
/// CHECK: Return [<<And>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
/// CHECK: <<Or:i\d+>> Or [<<Cond2>>,<<Cond1>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<Or>>]
/// CHECK: Return [<<BooleanNot>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: BooleanNot
/// CHECK-NOT: BooleanNot
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK-NOT: And
public static boolean $opt$noinline$booleanAndToOr(boolean a, boolean b) {
@@ -138,7 +138,7 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
@@ -148,18 +148,18 @@
/// CHECK: <<Or:i\d+>> Or [<<Select2>>,<<Select1>>]
/// CHECK: Return [<<Or>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
/// CHECK: <<And:i\d+>> And [<<Cond2>>,<<Cond1>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<And>>]
/// CHECK: Return [<<BooleanNot>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: BooleanNot
/// CHECK-NOT: BooleanNot
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK-NOT: Or
public static boolean $opt$noinline$booleanOrToAnd(boolean a, boolean b) {
@@ -246,7 +246,7 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
@@ -256,13 +256,13 @@
/// CHECK: <<Xor:i\d+>> Xor [<<Select2>>,<<Select1>>]
/// CHECK: Return [<<Xor>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
/// CHECK: <<Xor:i\d+>> Xor [<<Cond2>>,<<Cond1>>]
/// CHECK: Return [<<Xor>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK-NOT: BooleanNot
public static boolean $opt$noinline$booleanNotXorToXor(boolean a, boolean b) {
diff --git a/test/565-checker-rotate/src/Main.java b/test/565-checker-rotate/src/Main.java
index aadb597..eb0e868 100644
--- a/test/565-checker-rotate/src/Main.java
+++ b/test/565-checker-rotate/src/Main.java
@@ -52,14 +52,14 @@
/// CHECK-START: int Main.rotateLeftBoolean(boolean, int) select_generator (after)
/// CHECK-NOT: Phi
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
/// CHECK: <<ArgVal:z\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
/// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
/// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
/// CHECK-NOT: Select
private static int rotateLeftBoolean(boolean value, int distance) {
@@ -206,13 +206,13 @@
/// CHECK-START: int Main.rotateRightBoolean(boolean, int) select_generator (after)
/// CHECK-NOT: Phi
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
/// CHECK: <<ArgVal:z\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
/// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
/// CHECK-NOT: Select
private static int rotateRightBoolean(boolean value, int distance) {
diff --git a/test/566-checker-signum/src/Main.java b/test/566-checker-signum/src/Main.java
index 5f2cf3d..7fc9e84 100644
--- a/test/566-checker-signum/src/Main.java
+++ b/test/566-checker-signum/src/Main.java
@@ -45,13 +45,13 @@
/// CHECK-START: int Main.signBoolean(boolean) select_generator (after)
/// CHECK-NOT: Phi
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
/// CHECK-DAG: <<Result:i\d+>> Compare [<<Arg>>,<<Zero>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier$after_bce (after)
/// CHECK-NOT: Select
private static int signBoolean(boolean x) {
diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java
index 53852a4..793b85f 100644
--- a/test/566-polymorphic-inlining/src/Main.java
+++ b/test/566-polymorphic-inlining/src/Main.java
@@ -15,9 +15,9 @@
*/
interface Itf {
- public Class sameInvokeInterface();
- public Class sameInvokeInterface2();
- public Class sameInvokeInterface3();
+ public Class<?> sameInvokeInterface();
+ public Class<?> sameInvokeInterface2();
+ public Class<?> sameInvokeInterface3();
}
public class Main implements Itf {
@@ -81,31 +81,31 @@
assertEquals(20001, counter);
}
- public Class sameInvokeVirtual() {
+ public Class<?> sameInvokeVirtual() {
field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Main.class;
}
- public Class sameInvokeInterface() {
+ public Class<?> sameInvokeInterface() {
field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Itf.class;
}
- public Class sameInvokeInterface2() {
+ public Class<?> sameInvokeInterface2() {
field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Itf.class;
}
- public Class sameInvokeInterface3() {
+ public Class<?> sameInvokeInterface3() {
field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo.
return Itf.class;
}
- public static Class testInvokeInterface(Itf i) {
+ public static Class<?> testInvokeInterface(Itf i) {
return i.sameInvokeInterface();
}
- public static Class testInvokeInterface2(Itf i) {
+ public static Class<?> testInvokeInterface2(Itf i) {
// Make three interface calls that will do a ClassTableGet to ensure bogus code
// generation of ClassTableGet will crash.
i.sameInvokeInterface();
@@ -113,7 +113,7 @@
return i.sameInvokeInterface3();
}
- public static Class testInvokeVirtual(Main m) {
+ public static Class<?> testInvokeVirtual(Main m) {
return m.sameInvokeVirtual();
}
@@ -139,18 +139,18 @@
}
class OtherSubclass extends Main {
- public Class sameInvokeVirtual() {
+ public Class<?> sameInvokeVirtual() {
return OtherSubclass.class;
}
- public Class sameInvokeInterface() {
+ public Class<?> sameInvokeInterface() {
return OtherSubclass.class;
}
- public Class sameInvokeInterface2() {
+ public Class<?> sameInvokeInterface2() {
return null;
}
- public Class sameInvokeInterface3() {
+ public Class<?> sameInvokeInterface3() {
return null;
}
}
diff --git a/test/567-checker-compare/src/Main.java b/test/567-checker-compare/src/Main.java
index 8587950..a05bb60 100644
--- a/test/567-checker-compare/src/Main.java
+++ b/test/567-checker-compare/src/Main.java
@@ -75,13 +75,13 @@
/// CHECK-START: int Main.compareBooleans(boolean, boolean) select_generator (after)
/// CHECK-NOT: Phi
- /// CHECK-START: int Main.compareBooleans(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.compareBooleans(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<ArgX:z\d+>> ParameterValue
/// CHECK: <<ArgY:z\d+>> ParameterValue
/// CHECK-DAG: <<Result:i\d+>> Compare [<<ArgX>>,<<ArgY>>]
/// CHECK-DAG: Return [<<Result>>]
- /// CHECK-START: int Main.compareBooleans(boolean, boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.compareBooleans(boolean, boolean) instruction_simplifier$after_bce (after)
/// CHECK-NOT: Select
private static int compareBooleans(boolean x, boolean y) {
diff --git a/test/570-checker-osr/smali/Osr.smali b/test/570-checker-osr/smali/Osr.smali
index 869c7c3..6592b7b 100644
--- a/test/570-checker-osr/smali/Osr.smali
+++ b/test/570-checker-osr/smali/Osr.smali
@@ -19,7 +19,7 @@
# Check that blocks only havig nops are not merged when they are loop headers.
# This ensures we can do on-stack replacement for branches to those nop blocks.
-## CHECK-START: int Osr.simpleLoop(int, int) dead_code_elimination_final (after)
+## CHECK-START: int Osr.simpleLoop(int, int) dead_code_elimination$final (after)
## CHECK-DAG: SuspendCheck loop:<<OuterLoop:B\d+>> outer_loop:none
## CHECK-DAG: SuspendCheck loop:{{B\d+}} outer_loop:<<OuterLoop>>
.method public static simpleLoop(II)I
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 15c232d..8af3894 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -129,7 +129,7 @@
DeoptimizationController.startDeoptimization();
}
- public static Class $noinline$inlineCache(Main m, boolean isSecondInvocation) {
+ public static Class<?> $noinline$inlineCache(Main m, boolean isSecondInvocation) {
// If we are running in non-JIT mode, or were unlucky enough to get this method
// already JITted, just return the expected value.
if (!isInInterpreter("$noinline$inlineCache")) {
@@ -159,7 +159,7 @@
return other.returnClass();
}
- public static Class $noinline$inlineCache2(Main m, boolean isSecondInvocation) {
+ public static Class<?> $noinline$inlineCache2(Main m, boolean isSecondInvocation) {
// If we are running in non-JIT mode, or were unlucky enough to get this method
// already JITted, just return the expected value.
if (!isInInterpreter("$noinline$inlineCache2")) {
@@ -188,7 +188,7 @@
return (other == null) ? null : other.returnClass();
}
- public static Class $noinline$inlineCache3(Main m, boolean isSecondInvocation) {
+ public static Class<?> $noinline$inlineCache3(Main m, boolean isSecondInvocation) {
// If we are running in non-JIT mode, or were unlucky enough to get this method
// already JITted, just return the expected value.
if (!isInInterpreter("$noinline$inlineCache3")) {
@@ -229,7 +229,7 @@
return null;
}
- public Class returnClass() {
+ public Class<?> returnClass() {
return Main.class;
}
@@ -305,7 +305,7 @@
}
class SubMain extends Main {
- public Class returnClass() {
+ public Class<?> returnClass() {
return SubMain.class;
}
diff --git a/test/576-polymorphic-inlining/src/Main.java b/test/576-polymorphic-inlining/src/Main.java
index d8d09af..5763d89 100644
--- a/test/576-polymorphic-inlining/src/Main.java
+++ b/test/576-polymorphic-inlining/src/Main.java
@@ -65,11 +65,11 @@
public void willOnlyInlineForMainVoid() {
}
- public Class willInlineWithReturnValue() {
+ public Class<?> willInlineWithReturnValue() {
return Main.class;
}
- public Class willOnlyInlineForMainWithReturnValue() {
+ public Class<?> willOnlyInlineForMainWithReturnValue() {
return Main.class;
}
public static boolean doThrow;
@@ -83,21 +83,21 @@
public void willInlineVoid() {
}
- public Class willInlineWithReturnValue() {
+ public Class<?> willInlineWithReturnValue() {
return SubMain.class;
}
- public Class willOnlyInlineForMainWithReturnValue() {
+ public Class<?> willOnlyInlineForMainWithReturnValue() {
return SubMain.class;
}
}
class SubSubMain extends SubMain {
- public Class willInlineWithReturnValue() {
+ public Class<?> willInlineWithReturnValue() {
return SubSubMain.class;
}
- public Class willOnlyInlineForMainWithReturnValue() {
+ public Class<?> willOnlyInlineForMainWithReturnValue() {
return SubSubMain.class;
}
}
diff --git a/test/577-profile-foreign-dex/src/Main.java b/test/577-profile-foreign-dex/src/Main.java
index 0cd85b5..ed7a625 100644
--- a/test/577-profile-foreign-dex/src/Main.java
+++ b/test/577-profile-foreign-dex/src/Main.java
@@ -111,11 +111,11 @@
}
private static void loadDexFile(String dexFile) throws Exception {
- Class pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+ Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
if (pathClassLoader == null) {
throw new RuntimeException("Couldn't find path class loader class");
}
- Constructor constructor =
+ Constructor<?> constructor =
pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
constructor.newInstance(
dexFile, ClassLoader.getSystemClassLoader());
@@ -125,7 +125,7 @@
private static final Method registerAppInfoMethod;
static {
try {
- Class c = Class.forName("dalvik.system.VMRuntime");
+ Class<?> c = Class.forName("dalvik.system.VMRuntime");
registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
String.class, String.class, String[].class, String.class);
} catch (Exception e) {
diff --git a/test/580-checker-round/src/Main.java b/test/580-checker-round/src/Main.java
index 9e248ef..83bc55c 100644
--- a/test/580-checker-round/src/Main.java
+++ b/test/580-checker-round/src/Main.java
@@ -36,7 +36,8 @@
expectEquals32(-2, round32(-1.51f));
expectEquals32(-1, round32(-1.2f));
expectEquals32(-1, round32(-1.0f));
- expectEquals32(-1, round32(-0.51f));
+ expectEquals32(-1, round32(-0.5000001f));
+ expectEquals32(0, round32(-0.5f));
expectEquals32(0, round32(-0.2f));
expectEquals32(0, round32(-0.0f));
expectEquals32(0, round32(+0.0f));
@@ -47,11 +48,23 @@
expectEquals32(2, round32(+1.5f));
expectEquals32(2147483647, round32(Float.POSITIVE_INFINITY));
+ // Near minint.
+ expectEquals32(-2147483648, round32(Math.nextAfter(-2147483648.0f, Float.NEGATIVE_INFINITY)));
+ expectEquals32(-2147483648, round32(-2147483648.0f));
+ expectEquals32(-2147483520, round32(Math.nextAfter(-2147483648.0f, Float.POSITIVE_INFINITY)));
+
+ // Near maxint.
+ expectEquals32(2147483520, round32(Math.nextAfter(2147483648.0f, Float.NEGATIVE_INFINITY)));
+ expectEquals32(2147483647, round32(2147483648.0f));
+ expectEquals32(2147483647, round32(Math.nextAfter(2147483648.0f, Float.POSITIVE_INFINITY)));
+
// Some others.
for (int i = -100; i <= 100; ++i) {
expectEquals32(i - 1, round32((float) i - 0.51f));
+ expectEquals32(i, round32((float) i - 0.5f));
expectEquals32(i, round32((float) i));
expectEquals32(i + 1, round32((float) i + 0.5f));
+ expectEquals32(i + 1, round32((float) i + 0.51f));
}
for (float f = -1.5f; f <= -1.499f; f = Math.nextAfter(f, Float.POSITIVE_INFINITY)) {
expectEquals32(-1, round32(f));
@@ -61,8 +74,10 @@
float[] fvals = {
-16777215.5f,
-16777215.0f,
- -0.4999f,
- 0.4999f,
+ -0.49999998f,
+ -0.4999999701976776123046875f,
+ 0.4999999701976776123046875f,
+ 0.49999998f,
16777215.0f,
16777215.5f
};
@@ -71,6 +86,8 @@
-16777215,
0,
0,
+ 0,
+ 0,
16777215,
16777216
};
@@ -98,7 +115,8 @@
expectEquals64(-2L, round64(-1.51d));
expectEquals64(-1L, round64(-1.2d));
expectEquals64(-1L, round64(-1.0d));
- expectEquals64(-1L, round64(-0.51d));
+ expectEquals64(-1L, round64(-0.5000001f));
+ expectEquals64(0L, round64(-0.5d));
expectEquals64(0L, round64(-0.2d));
expectEquals64(0L, round64(-0.0d));
expectEquals64(0L, round64(+0.0d));
@@ -109,11 +127,27 @@
expectEquals64(2L, round64(+1.5d));
expectEquals64(9223372036854775807L, round64(Double.POSITIVE_INFINITY));
+ // Near minlong.
+ expectEquals64(-9223372036854775808L,
+ round64(Math.nextAfter(-9223372036854775808.0, Double.NEGATIVE_INFINITY)));
+ expectEquals64(-9223372036854775808L, round64(-9223372036854775808.0));
+ expectEquals64(-9223372036854774784L,
+ round64(Math.nextAfter(-9223372036854775809.0, Double.POSITIVE_INFINITY)));
+
+ // Near maxlong.
+ expectEquals64(9223372036854774784L,
+ round64(Math.nextAfter(9223372036854775808.0, Double.NEGATIVE_INFINITY)));
+ expectEquals64(9223372036854775807L, round64(9223372036854775808.0));
+ expectEquals64(9223372036854775807L,
+ round64(Math.nextAfter(9223372036854775808.0, Double.POSITIVE_INFINITY)));
+
// Some others.
for (long l = -100; l <= 100; ++l) {
expectEquals64(l - 1, round64((double) l - 0.51d));
+ expectEquals64(l, round64((double) l - 0.5d));
+ expectEquals64(l, round64((double) l));
expectEquals64(l + 1, round64((double) l + 0.5d));
- expectEquals64(l + 1, round64((double) l + 0.5d));
+ expectEquals64(l + 1, round64((double) l + 0.51d));
}
for (double d = -1.5d; d <= -1.49999999999d; d = Math.nextAfter(d, Double.POSITIVE_INFINITY)) {
expectEquals64(-1L, round64(d));
@@ -123,8 +157,10 @@
double[] dvals = {
-9007199254740991.5d,
-9007199254740991.0d,
+ -0.49999999999999997d,
-0.49999999999999994d,
0.49999999999999994d,
+ 0.49999999999999997d,
9007199254740991.0d,
9007199254740991.5d
};
@@ -133,6 +169,8 @@
-9007199254740991L,
0L,
0L,
+ 0L,
+ 0L,
9007199254740991L,
9007199254740992L
};
diff --git a/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali b/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali
index 7dbd9da..186f0ab 100644
--- a/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali
+++ b/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali
@@ -16,7 +16,7 @@
.super Ljava/lang/Object;
-## CHECK-START-X86: int IrreducibleLoop.simpleLoop1(int) dead_code_elimination (before)
+## CHECK-START-X86: int IrreducibleLoop.simpleLoop1(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
## CHECK-DAG: Goto irreducible:true
@@ -57,7 +57,7 @@
return v0
.end method
-## CHECK-START-X86: int IrreducibleLoop.simpleLoop2(int) dead_code_elimination (before)
+## CHECK-START-X86: int IrreducibleLoop.simpleLoop2(int) dead_code_elimination$initial (before)
## CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod
## CHECK-DAG: <<Constant:i\d+>> IntConstant 42
## CHECK-DAG: Goto irreducible:true
diff --git a/test/591-checker-regression-dead-loop/src/Main.java b/test/591-checker-regression-dead-loop/src/Main.java
index 6d9fcf8..19856cf 100644
--- a/test/591-checker-regression-dead-loop/src/Main.java
+++ b/test/591-checker-regression-dead-loop/src/Main.java
@@ -17,7 +17,7 @@
class Main {
private static boolean $inline$false() { return false; }
- /// CHECK-START: void Main.main(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.main(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Add:i\d+>>] loop:{{B\d+}}
diff --git a/test/593-checker-boolean-to-integral-conv/src/Main.java b/test/593-checker-boolean-to-integral-conv/src/Main.java
index ba65839..b4c91c8 100644
--- a/test/593-checker-boolean-to-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-to-integral-conv/src/Main.java
@@ -46,7 +46,7 @@
/// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
/// CHECK-DAG: Return [<<IToS>>]
- /// CHECK-START: byte Main.booleanToByte(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: byte Main.booleanToByte(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -72,7 +72,7 @@
/// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
/// CHECK-DAG: Return [<<IToS>>]
- /// CHECK-START: short Main.booleanToShort(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: short Main.booleanToShort(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -98,7 +98,7 @@
/// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
/// CHECK-DAG: Return [<<IToC>>]
- /// CHECK-START: char Main.booleanToChar(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: char Main.booleanToChar(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -122,7 +122,7 @@
/// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
/// CHECK-DAG: Return [<<Sel>>]
- /// CHECK-START: int Main.booleanToInt(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.booleanToInt(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -148,7 +148,7 @@
/// CHECK-DAG: <<IToJ:j\d+>> TypeConversion [<<Sel>>]
/// CHECK-DAG: Return [<<IToJ>>]
- /// CHECK-START: long Main.booleanToLong(boolean) instruction_simplifier_after_bce (after)
+ /// CHECK-START: long Main.booleanToLong(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<ZToJ:j\d+>> TypeConversion [<<Arg>>]
/// CHECK-DAG: Return [<<ZToJ>>]
@@ -185,7 +185,7 @@
/// CHECK-DAG: <<JToI:i\d+>> TypeConversion [<<IToJ>>]
/// CHECK-DAG: Return [<<JToI>>]
- /// CHECK-START: int Main.longToIntOfBoolean() instruction_simplifier_after_bce (after)
+ /// CHECK-START: int Main.longToIntOfBoolean() instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
/// CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
/// CHECK-DAG: Return [<<Sget>>]
diff --git a/test/601-method-access/src/Main.java b/test/601-method-access/src/Main.java
index 838080a..9d9e568 100644
--- a/test/601-method-access/src/Main.java
+++ b/test/601-method-access/src/Main.java
@@ -22,7 +22,7 @@
public class Main {
public static void main(String[] args) {
try {
- Class c = Class.forName("SubClassUsingInaccessibleMethod");
+ Class<?> c = Class.forName("SubClassUsingInaccessibleMethod");
Object o = c.newInstance();
c.getMethod("test").invoke(o, null);
} catch (InvocationTargetException ite) {
diff --git a/test/604-hot-static-interface/src/Main.java b/test/604-hot-static-interface/src/Main.java
index 04d7cd6..a26623c 100644
--- a/test/604-hot-static-interface/src/Main.java
+++ b/test/604-hot-static-interface/src/Main.java
@@ -29,7 +29,7 @@
}
}
- private static native void ensureJitCompiled(Class itf, String method_name);
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
}
interface Itf {
diff --git a/test/605-new-string-from-bytes/src/Main.java b/test/605-new-string-from-bytes/src/Main.java
index 7dc0c15..5bd6c5d 100644
--- a/test/605-new-string-from-bytes/src/Main.java
+++ b/test/605-new-string-from-bytes/src/Main.java
@@ -20,7 +20,7 @@
public class Main {
public static void main(String[] args) throws Exception {
- Class c = Class.forName("java.lang.StringFactory");
+ Class<?> c = Class.forName("java.lang.StringFactory");
Method m = c.getDeclaredMethod("newStringFromBytes", byte[].class, int.class);
// Loop over allocations to get more chances of doing GC while in the
diff --git a/test/611-checker-simplify-if/src/Main.java b/test/611-checker-simplify-if/src/Main.java
index 21f4115..7dac007 100644
--- a/test/611-checker-simplify-if/src/Main.java
+++ b/test/611-checker-simplify-if/src/Main.java
@@ -35,14 +35,14 @@
// Test when a condition is the input of the if.
- /// CHECK-START: void Main.testNoInline(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testNoInline(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK: <<Const0:i\d+>> IntConstant 0
/// CHECK: If
/// CHECK: <<Phi:i\d+>> Phi
/// CHECK: <<Equal:z\d+>> Equal [<<Phi>>,<<Const0>>]
/// CHECK: If [<<Equal>>]
- /// CHECK-START: void Main.testNoInline(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testNoInline(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK: If
/// CHECK-NOT: Phi
/// CHECK-NOT: Equal
@@ -64,13 +64,13 @@
// Test when the phi is the input of the if.
- /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination_final (before)
+ /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (before)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: If
/// CHECK-DAG: <<Phi:i\d+>> Phi
/// CHECK-DAG: If [<<Phi>>]
- /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination_final (after)
+ /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (after)
/// CHECK: If
/// CHECK-NOT: Phi
/// CHECK-NOT: If
@@ -96,7 +96,7 @@
// Test when one input is not a constant. We can only optimize the constant input.
- /// CHECK-START: void Main.testNonConstantInputs(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testNonConstantInputs(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK-DAG: <<Const34:i\d+>> IntConstant 34
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
/// CHECK-DAG: If
@@ -105,7 +105,7 @@
/// CHECK-DAG: <<NotEqual:z\d+>> NotEqual [<<Phi>>,<<Const42>>]
/// CHECK-DAG: If [<<NotEqual>>]
- /// CHECK-START: void Main.testNonConstantInputs(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testNonConstantInputs(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
/// CHECK-DAG: If
/// CHECK-DAG: <<StaticFieldGet:i\d+>> StaticFieldGet
@@ -129,7 +129,7 @@
// Test with a condition.
- /// CHECK-START: void Main.testGreaterCondition(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testGreaterCondition(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK-DAG: <<Const34:i\d+>> IntConstant 34
/// CHECK-DAG: <<Const22:i\d+>> IntConstant 22
/// CHECK-DAG: <<Const25:i\d+>> IntConstant 25
@@ -138,7 +138,7 @@
/// CHECK-DAG: <<GE:z\d+>> GreaterThanOrEqual [<<Phi>>,<<Const25>>]
/// CHECK-DAG: If [<<GE>>]
- /// CHECK-START: void Main.testGreaterCondition(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testGreaterCondition(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK-DAG: If
/// CHECK-NOT: Phi
/// CHECK-NOT: GreaterThanOrEqual
@@ -160,7 +160,7 @@
// Test when comparing non constants.
- /// CHECK-START: void Main.testNonConstantEqual(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testNonConstantEqual(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK-DAG: <<Const34:i\d+>> IntConstant 34
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
/// CHECK-DAG: If
@@ -169,7 +169,7 @@
/// CHECK-DAG: <<NotEqual:z\d+>> NotEqual [<<Phi>>,<<StaticFieldGet>>]
/// CHECK-DAG: If [<<NotEqual>>]
- /// CHECK-START: void Main.testNonConstantEqual(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testNonConstantEqual(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK-DAG: <<Const34:i\d+>> IntConstant 34
/// CHECK-DAG: If
/// CHECK-DAG: <<StaticFieldGet:i\d+>> StaticFieldGet
@@ -217,12 +217,12 @@
return true;
}
- /// CHECK-START: void Main.testSwitch(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testSwitch(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK: If
/// CHECK: If
/// CHECK: If
- /// CHECK-START: void Main.testSwitch(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testSwitch(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK: If
/// CHECK: If
/// CHECK-NOT: If
@@ -248,11 +248,11 @@
// Redirect default here.
}
- /// CHECK-START: void Main.testFP(java.lang.String[]) dead_code_elimination (before)
+ /// CHECK-START: void Main.testFP(java.lang.String[]) dead_code_elimination$initial (before)
/// CHECK: If
/// CHECK: If
- /// CHECK-START: void Main.testFP(java.lang.String[]) dead_code_elimination (after)
+ /// CHECK-START: void Main.testFP(java.lang.String[]) dead_code_elimination$initial (after)
/// CHECK: If
/// CHECK: If
public static void testFP(String[] args) {
diff --git a/test/612-jit-dex-cache/src-ex/LoadedByAppClassLoader.java b/test/612-jit-dex-cache/src-ex/LoadedByAppClassLoader.java
index 1d6158a..fcb314d 100644
--- a/test/612-jit-dex-cache/src-ex/LoadedByAppClassLoader.java
+++ b/test/612-jit-dex-cache/src-ex/LoadedByAppClassLoader.java
@@ -29,7 +29,7 @@
}
class OtherClass {
- public static Class getB() {
+ public static Class<?> getB() {
// This used to return the B class of another class loader.
return B.class;
}
diff --git a/test/612-jit-dex-cache/src/Main.java b/test/612-jit-dex-cache/src/Main.java
index 0e4bd22..89ebe09 100644
--- a/test/612-jit-dex-cache/src/Main.java
+++ b/test/612-jit-dex-cache/src/Main.java
@@ -41,7 +41,7 @@
public class Main {
- private static Class classFromDifferentLoader() throws Exception {
+ private static Class<?> classFromDifferentLoader() throws Exception {
final String DEX_FILE = System.getenv("DEX_LOCATION") + "/612-jit-dex-cache-ex.jar";
ClassLoader loader = new DelegateLastPathClassLoader(DEX_FILE, Main.class.getClassLoader());
return loader.loadClass("LoadedByAppClassLoader");
@@ -49,7 +49,7 @@
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
- Class cls = classFromDifferentLoader();
+ Class<?> cls = classFromDifferentLoader();
Method m = cls.getDeclaredMethod("letMeInlineYou", A.class);
B b = new B();
// Invoke the method enough times to get an inline cache and get JITted.
@@ -63,5 +63,5 @@
}
}
- public static native void ensureJitCompiled(Class cls, String method_name);
+ public static native void ensureJitCompiled(Class<?> cls, String method_name);
}
diff --git a/test/614-checker-dump-constant-location/expected.txt b/test/614-checker-dump-constant-location/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/614-checker-dump-constant-location/expected.txt
diff --git a/test/614-checker-dump-constant-location/info.txt b/test/614-checker-dump-constant-location/info.txt
new file mode 100644
index 0000000..4a94ffa
--- /dev/null
+++ b/test/614-checker-dump-constant-location/info.txt
@@ -0,0 +1,2 @@
+Test that the graph visualizer outputs useful information for constant
+locations in parallel moves.
diff --git a/test/614-checker-dump-constant-location/src/Main.java b/test/614-checker-dump-constant-location/src/Main.java
new file mode 100644
index 0000000..f6bc063
--- /dev/null
+++ b/test/614-checker-dump-constant-location/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static int array_int[] = { 0 };
+ public static long array_long[] = { 0 };
+ public static float array_float[] = { 0.0f };
+ public static double array_double[] = { 0.0 };
+
+ // The code used to print constant locations in parallel moves is architecture
+ // independent. We only test for ARM and ARM64 as it is easy: 'store'
+ // instructions only take registers as a source.
+
+ /// CHECK-START-ARM: void Main.store_to_arrays() register (after)
+ /// CHECK: ParallelMove {{.*#1->.*#2->.*#3\.3->.*#4\.4->.*}}
+
+ /// CHECK-START-ARM64: void Main.store_to_arrays() register (after)
+ /// CHECK: ParallelMove {{.*#1->.*#2->.*#3\.3->.*#4\.4->.*}}
+
+ public void store_to_arrays() {
+ array_int[0] = 1;
+ array_long[0] = 2;
+ array_float[0] = 3.3f;
+ array_double[0] = 4.4;
+ }
+
+ public static void main(String args[]) {}
+}
diff --git a/test/615-checker-arm64-zr-parallel-move/expected.txt b/test/615-checker-arm64-zr-parallel-move/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/expected.txt
diff --git a/test/615-checker-arm64-zr-parallel-move/info.txt b/test/615-checker-arm64-zr-parallel-move/info.txt
new file mode 100644
index 0000000..199755d
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/info.txt
@@ -0,0 +1 @@
+Checker test to verify we correctly use wzr and xzr to synthesize zero constants.
diff --git a/test/615-checker-arm64-zr-parallel-move/src/Main.java b/test/615-checker-arm64-zr-parallel-move/src/Main.java
new file mode 100644
index 0000000..5024f28
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/src/Main.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static boolean doThrow = false;
+
+ public void $noinline$foo(int in_w1,
+ int in_w2,
+ int in_w3,
+ int in_w4,
+ int in_w5,
+ int in_w6,
+ int in_w7,
+ int on_stack_int,
+ long on_stack_long,
+ float in_s0,
+ float in_s1,
+ float in_s2,
+ float in_s3,
+ float in_s4,
+ float in_s5,
+ float in_s6,
+ float in_s7,
+ float on_stack_float,
+ double on_stack_double) {
+ if (doThrow) throw new Error();
+ }
+
+ // We expect a parallel move that moves four times the zero constant to stack locations.
+ /// CHECK-START-ARM64: void Main.bar() register (after)
+ /// CHECK: ParallelMove {{.*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*}}
+
+ // Those four moves should generate four 'store' instructions using directly the zero register.
+ /// CHECK-START-ARM64: void Main.bar() disassembly (after)
+ /// CHECK-DAG: {{(str|stur)}} wzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} xzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} wzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} xzr, [sp, #{{[0-9]+}}]
+
+ public void bar() {
+ $noinline$foo(1, 2, 3, 4, 5, 6, 7, // Integral values in registers.
+ 0, 0L, // Integral values on the stack.
+ 1, 2, 3, 4, 5, 6, 7, 8, // Floating-point values in registers.
+ 0.0f, 0.0); // Floating-point values on the stack.
+ }
+
+ public static void main(String args[]) {}
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8f8b667..bba6f8e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -26,7 +26,8 @@
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
-art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
+art_run_tests_build_dir := $(call intermediates-dir-for,JAVA_LIBRARIES,art-run-tests)/DATA
+art_run_tests_install_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
# A generated list of prerequisites that call 'run-test --build-only', the actual prerequisite is
# an empty file touched in the intermediate directory.
@@ -49,7 +50,8 @@
# Helper to create individual build targets for tests. Must be called with $(eval).
# $(1): the test number
define define-build-art-run-test
- dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
+ dmart_target := $(art_run_tests_build_dir)/art-run-tests/$(1)/touch
+ dmart_install_target := $(art_run_tests_install_dir)/art-run-tests/$(1)/touch
run_test_options = --build-only
ifeq ($(ART_TEST_QUIET),true)
run_test_options += --quiet
@@ -67,8 +69,13 @@
$(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
- TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target)
+$$(dmart_install_target): $$(dmart_target)
+ $(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
+ $(hide) cp $$(dir $$<)/* $$(dir $$@)/
+
+ TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_install_target)
dmart_target :=
+ dmart_install_target :=
run_test_options :=
endef
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test))))
@@ -78,12 +85,13 @@
LOCAL_MODULE := art-run-tests
LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES)
# The build system use this flag to pick up files generated by declare-make-art-run-test.
-LOCAL_PICKUP_FILES := $(art_run_tests_dir)
+LOCAL_PICKUP_FILES := $(art_run_tests_install_dir)
include $(BUILD_PHONY_PACKAGE)
# Clear temp vars.
-art_run_tests_dir :=
+art_run_tests_build_dir :=
+art_run_tests_install_dir :=
define-build-art-run-test :=
TEST_ART_RUN_TEST_BUILD_RULES :=
@@ -111,8 +119,14 @@
ifeq ($(ART_TEST_JIT),true)
COMPILER_TYPES += jit
endif
+OPTIMIZING_COMPILER_TYPES :=
ifeq ($(ART_TEST_OPTIMIZING),true)
COMPILER_TYPES += optimizing
+ OPTIMIZING_COMPILER_TYPES += optimizing
+endif
+ifeq ($(ART_TEST_OPTIMIZING_GRAPH_COLOR),true)
+ COMPILER_TYPES += regalloc_gc
+ OPTIMIZING_COMPILER_TYPES += regalloc_gc
endif
RELOCATE_TYPES := relocate
ifeq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
@@ -468,14 +482,28 @@
TEST_ART_BROKEN_JIT_RUN_TESTS :=
+# Known broken tests for the graph coloring register allocator.
+# These tests were based on the linear scan allocator, which makes different decisions than
+# the graph coloring allocator. (These attempt to test for code quality, not correctness.)
+TEST_ART_BROKEN_OPTIMIZING_GRAPH_COLOR := \
+ 570-checker-select \
+ 484-checker-register-hints
+
+ifneq (,$(filter regalloc_gc,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ regalloc_gc,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_OPTIMIZING_GRAPH_COLOR),$(ALL_ADDRESS_SIZES))
+endif
+
# Known broken tests for the mips32 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
510-checker-try-catch \
ifeq (mips,$(TARGET_ARCH))
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -487,9 +515,9 @@
TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS := \
ifeq (mips64,$(TARGET_ARCH))
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -502,9 +530,9 @@
454-get-vreg \
457-regs \
-ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),ndebuggable,$(TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -513,9 +541,9 @@
# Tests that should fail when the optimizing compiler compiles them debuggable.
TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS := \
-ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -527,13 +555,10 @@
# Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
# 484: Baker's fast path based read barrier compiler instrumentation generates code containing
# more parallel moves on x86, thus some Checker assertions may fail.
-# 527: On ARM64 and ARM, the read barrier instrumentation does not support the HIntermediateAddress
-# instruction yet (b/26601270).
# 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are
# not yet handled in the read barrier configuration.
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
484-checker-register-hints \
- 527-checker-array-access-split \
537-checker-arraycopy
# Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
@@ -547,9 +572,9 @@
$(TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
+ $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -578,9 +603,9 @@
055-enum-performance
ifeq ($(ART_HEAP_POISONING),true)
- ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
- $(PREBUILD_TYPES),optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
@@ -727,6 +752,9 @@
ifeq ($(4),optimizing)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_RULES
run_test_options += --optimizing
+ else ifeq ($(4),regalloc_gc)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_GRAPH_COLOR_RULES
+ run_test_options += --optimizing -Xcompiler-option --register-allocation-strategy=graph-color
else
ifeq ($(4),interpreter)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
@@ -810,6 +838,10 @@
endif
endif
image_suffix := $(4)
+ ifeq ($(4),regalloc_gc)
+ # Graph coloring tests share the image_suffix with optimizing tests.
+ image_suffix := optimizing
+ endif
ifeq ($(9),no-image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
run_test_options += --no-image
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index 19c13f7..4a8b0e0 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -35,11 +35,11 @@
static native int getText(long val1, Object obj1, long val2, Object obj2);
synchronized native Object []getSinkPropertiesNative(String path);
- native Class instanceMethodThatShouldReturnClass();
- static native Class staticMethodThatShouldReturnClass();
+ native Class<?> instanceMethodThatShouldReturnClass();
+ static native Class<?> staticMethodThatShouldReturnClass();
- native void instanceMethodThatShouldTakeClass(int i, Class c);
- static native void staticMethodThatShouldTakeClass(int i, Class c);
+ native void instanceMethodThatShouldTakeClass(int i, Class<?> c);
+ static native void staticMethodThatShouldTakeClass(int i, Class<?> c);
native float checkFloats(float f1, float f2);
native void forceStackParameters(int i1, int i2, int i3, int i4, int i5, int i6, int i8, int i9,
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 806e130..ee2ee1a 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -130,18 +130,18 @@
return;
}
- ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* method = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
-
- mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(chars.c_str(), kRuntimePointerSize);
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ method = soa.Decode<mirror::Class*>(cls)->FindDeclaredDirectMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ }
jit::JitCodeCache* code_cache = jit->GetCodeCache();
OatQuickMethodHeader* header = nullptr;
- // Make sure there is a profiling info, required by the compiler.
- ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
while (true) {
header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
if (code_cache->ContainsPc(header->GetCode())) {
@@ -149,6 +149,9 @@
} else {
// Sleep to yield to the compiler thread.
usleep(1000);
+ ScopedObjectAccess soa(Thread::Current());
+ // Make sure there is a profiling info, required by the compiler.
+ ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
// Will either ensure it's compiled or do the compilation itself.
jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 64bf4f3..c6c9380 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -553,12 +553,10 @@
if [ "$TIME_OUT" = "timeout" ]; then
# Add timeout command if time out is desired.
#
- # Note: We use nested timeouts. The inner timeout sends SIGRTMIN+2 (usually 36) to ART, which
- # will induce a full thread dump before abort. However, dumping threads might deadlock,
- # so the outer timeout sends the regular SIGTERM after an additional minute to ensure
- # termination (without dumping all threads).
- TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 60))
- cmdline="timeout ${TIME_PLUS_ONE}s timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
+ # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
+ # before abort. However, dumping threads might deadlock, so we also use the "-k"
+ # option to definitely kill the child.
+ cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 834426d..60e0cd8 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -30,6 +30,10 @@
LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := ahat
+
+# Let users with Java 7 run ahat (b/28303627)
+LOCAL_JAVA_LANGUAGE_VERSION := 1.7
+
include $(BUILD_HOST_JAVA_LIBRARY)
# --- ahat script ----------------
diff --git a/tools/ahat/src/AhatSnapshot.java b/tools/ahat/src/AhatSnapshot.java
index e6f8411..a8205c7 100644
--- a/tools/ahat/src/AhatSnapshot.java
+++ b/tools/ahat/src/AhatSnapshot.java
@@ -86,7 +86,7 @@
mSnapshot = snapshot;
mHeaps = new ArrayList<Heap>(mSnapshot.getHeaps());
- ClassObj javaLangClass = mSnapshot.findClass("java.lang.Class");
+ final ClassObj javaLangClass = mSnapshot.findClass("java.lang.Class");
for (Heap heap : mHeaps) {
// Use a single element array for the total to act as a reference to a
// long.
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 3cdb40c..8769d11 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -95,9 +95,7 @@
return null;
}
- // TODO: When perflib provides a better way to get the length of the
- // array, we should use that here.
- int numChars = chars.getValues().length;
+ int numChars = chars.getLength();
int count = getIntField(inst, "count", numChars);
if (count == 0) {
return "";
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index d88a4a0..12e0338 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -21,7 +21,7 @@
out_dir=${OUT_DIR-out}
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests ${out_dir}/host/linux-x86/bin/jack"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target ${out_dir}/host/linux-x86/bin/jack"
mode="target"
j_arg="-j$(nproc)"
showcommands=
diff --git a/tools/cpp-define-generator/constant_heap.def b/tools/cpp-define-generator/constant_heap.def
new file mode 100644
index 0000000..dc76736
--- /dev/null
+++ b/tools/cpp-define-generator/constant_heap.def
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Export heap values.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "gc/heap.h"
+#endif
+
+// Size of references to the heap on the stack.
+DEFINE_EXPR(MIN_LARGE_OBJECT_THRESHOLD, size_t, art::gc::Heap::kMinLargeObjectThreshold)
+
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
index c1e6099..67ed5b5 100644
--- a/tools/cpp-define-generator/constant_lockword.def
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -30,5 +30,12 @@
DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE, int32_t, kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED, uint32_t, kGCStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT, int32_t, kGCStateShift)
+
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT, int32_t, kMarkBitStateShift)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED, uint32_t, kMarkBitStateMaskShifted)
+
#undef DEFINE_LOCK_WORD_EXPR
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
index b327ca3..123992f 100644
--- a/tools/cpp-define-generator/offset_runtime.def
+++ b/tools/cpp-define-generator/offset_runtime.def
@@ -34,6 +34,8 @@
DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_ONLY, kRefsOnly)
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_AND_ARGS, kRefsAndArgs)
+// Offset of field Runtime::callee_save_methods_[kSaveEverything]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, kSaveEverything)
#undef DEFINE_RUNTIME_CALLEE_SAVE_OFFSET
#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index 01e4d5b..d2d8777 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -48,6 +48,7 @@
// TODO: MIRROR_*_ARRAY offsets (depends on header size)
// TODO: MIRROR_STRING offsets (depends on header size)
#include "offset_dexcache.def"
+#include "constant_heap.def"
#include "constant_lockword.def"
#include "constant_globals.def"
#include "constant_rosalloc.def"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index bf8d12b..e5d7597 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -36,6 +36,15 @@
names: ["libcore.io.OsTest#testUnixDomainSockets_in_file_system"]
},
{
+ description: "TCP_USER_TIMEOUT is not defined on host's tcp.h (glibc-2.15-4.8).",
+ result: EXEC_FAILED,
+ modes: [host],
+ names: ["libcore.android.system.OsConstantsTest#testTcpUserTimeoutIsDefined",
+ "libcore.io.OsTest#test_socket_tcpUserTimeout_setAndGet",
+ "libcore.io.OsTest#test_socket_tcpUserTimeout_doesNotWorkOnDatagramSocket"],
+ bug: 30402085
+},
+{
description: "Issue with incorrect device time (1970)",
result: EXEC_FAILED,
modes: [device],
@@ -171,43 +180,6 @@
bug: 25437292
},
{
- description: "Failing tests after OpenJDK move.",
- result: EXEC_FAILED,
- bug: 26326992,
- names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST",
- "libcore.java.lang.OldSystemTest#test_load",
- "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits",
- "libcore.java.text.NumberFormatTest#test_setCurrency",
- "libcore.java.text.OldNumberFormatTest#test_getIntegerInstanceLjava_util_Locale",
- "libcore.java.util.CalendarTest#testAddOneDayAndOneDayOver30MinuteDstForwardAdds48Hours",
- "libcore.java.util.CalendarTest#testNewCalendarKoreaIsSelfConsistent",
- "libcore.java.util.CalendarTest#testSetTimeInZoneWhereDstIsNoLongerUsed",
- "libcore.java.util.CalendarTest#test_nullLocale",
- "libcore.java.util.FormatterTest#test_numberLocalization",
- "libcore.java.util.FormatterTest#test_uppercaseConversions",
- "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure",
- "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString",
- "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration",
- "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException",
- "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261",
- "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon",
- "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
- "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_W_w_dd_MMMM_yyyy_EEEE",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_dayOfYearPatterns",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_m_z",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_2DigitOffsetFromGMT",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetFromGMT",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetNoGMT",
- "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
- "libcore.java.util.CalendarTest#test_clear_45877",
- "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
- "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
-},
-{
description: "Missing resource in classpath",
result: EXEC_FAILED,
modes: [device],
@@ -262,10 +234,12 @@
names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_destroyForcibly"]
},
{
- description: "Flaky failure, possibly caused by a kernel bug accessing /proc/",
+ description: "Flaky failure, native crash in the runtime.
+ Unclear if this relates to the tests running sh as a child process.",
result: EXEC_FAILED,
- bug: 27464570,
+ bug: 30657148,
modes: [device],
- names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit"]
+ names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
+ "libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
}
]
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 3605aa0..2a6e172 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -25,17 +25,22 @@
JAVA_LIBRARIES=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
fi
-# Jar containing jsr166 tests.
-jsr166_test_jack=${JAVA_LIBRARIES}/jsr166-tests_intermediates/classes.jack
+function cparg {
+ for var
+ do
+ printf -- "--classpath ${JAVA_LIBRARIES}/${var}_intermediates/classes.jack ";
+ done
+}
-# Jar containing all the other tests.
-test_jack=${JAVA_LIBRARIES}/core-tests_intermediates/classes.jack
+DEPS="core-tests jsr166-tests mockito-target"
-if [ ! -f $test_jack ]; then
- echo "Before running, you must build core-tests, jsr166-tests and vogar: \
- make core-tests jsr166-tests vogar"
- exit 1
-fi
+for lib in $DEPS
+do
+ if [ ! -f "${JAVA_LIBRARIES}/${lib}_intermediates/classes.jack" ]; then
+ echo "${lib} is missing. Before running, you must run art/tools/buildbot-build.sh"
+ exit 1
+ fi
+done
expectations="--expectations art/tools/libcore_failures.txt"
if [ "x$ART_USE_READ_BARRIER" = xtrue ]; then
@@ -133,4 +138,4 @@
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args $expectations --classpath $jsr166_test_jack --classpath $test_jack ${working_packages[@]}
+vogar $vogar_args $expectations $(cparg $DEPS) ${working_packages[@]}