summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk15
-rw-r--r--compiler/common_compiler_test.h11
-rw-r--r--compiler/compiled_method.cc15
-rw-r--r--compiler/compiled_method.h9
-rw-r--r--compiler/compiler_backend.cc39
-rw-r--r--compiler/compiler_backend.h34
-rw-r--r--compiler/dex/bb_optimizations.h14
-rw-r--r--compiler/dex/compiler_ir.h3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc8
-rw-r--r--compiler/dex/frontend.cc20
-rw-r--r--compiler/dex/frontend.h2
-rw-r--r--compiler/dex/local_value_numbering.cc73
-rw-r--r--compiler/dex/local_value_numbering_test.cc75
-rw-r--r--compiler/dex/mir_analysis.cc114
-rw-r--r--compiler/dex/mir_field_info.cc124
-rw-r--r--compiler/dex/mir_field_info.h211
-rw-r--r--compiler/dex/mir_graph.cc19
-rw-r--r--compiler/dex/mir_graph.h40
-rw-r--r--compiler/dex/mir_optimization.cc67
-rw-r--r--compiler/dex/pass.h4
-rw-r--r--compiler/dex/pass_driver.cc1
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc1
-rw-r--r--compiler/dex/portable/mir_to_gbc.h7
-rw-r--r--compiler/dex/quick/arm/arm_lir.h23
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc5
-rw-r--r--compiler/dex/quick/arm/call_arm.cc11
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h2
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc60
-rw-r--r--compiler/dex/quick/arm/int_arm.cc342
-rw-r--r--compiler/dex/quick/arm/target_arm.cc39
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc15
-rw-r--r--compiler/dex/quick/codegen_util.cc60
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h11
-rw-r--r--compiler/dex/quick/gen_common.cc314
-rw-r--r--compiler/dex/quick/gen_invoke.cc136
-rw-r--r--compiler/dex/quick/gen_loadstore.cc144
-rw-r--r--compiler/dex/quick/mips/call_mips.cc10
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h2
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc20
-rw-r--r--compiler/dex/quick/mips/int_mips.cc104
-rw-r--r--compiler/dex/quick/mips/mips_lir.h24
-rw-r--r--compiler/dex/quick/mips/target_mips.cc35
-rw-r--r--compiler/dex/quick/mir_to_lir.cc83
-rw-r--r--compiler/dex/quick/mir_to_lir.h18
-rw-r--r--compiler/dex/quick/ralloc_util.cc130
-rw-r--r--compiler/dex/quick/x86/call_x86.cc14
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h21
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc84
-rw-r--r--compiler/dex/quick/x86/int_x86.cc350
-rw-r--r--compiler/dex/quick/x86/target_x86.cc332
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc4
-rw-r--r--compiler/dex/quick/x86/x86_lir.h23
-rw-r--r--compiler/dex/reg_storage.h158
-rw-r--r--compiler/dex/vreg_analysis.cc14
-rw-r--r--compiler/driver/compiler_callbacks_impl.h10
-rw-r--r--compiler/driver/compiler_driver-inl.h165
-rw-r--r--compiler/driver/compiler_driver.cc303
-rw-r--r--compiler/driver/compiler_driver.h119
-rw-r--r--compiler/driver/compiler_driver_test.cc2
-rw-r--r--compiler/driver/compiler_options.h14
-rw-r--r--compiler/elf_writer.cc7
-rw-r--r--compiler/elf_writer.h17
-rw-r--r--compiler/elf_writer_mclinker.cc42
-rw-r--r--compiler/elf_writer_mclinker.h17
-rw-r--r--compiler/elf_writer_quick.cc347
-rw-r--r--compiler/elf_writer_quick.h28
-rw-r--r--compiler/image_test.cc20
-rw-r--r--compiler/image_writer.cc25
-rw-r--r--compiler/image_writer.h5
-rw-r--r--compiler/llvm/compiler_llvm.cc10
-rw-r--r--compiler/llvm/compiler_llvm.h2
-rw-r--r--compiler/llvm/gbc_expander.cc33
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc3
-rw-r--r--compiler/oat_test.cc8
-rw-r--r--compiler/oat_writer.cc195
-rw-r--r--compiler/oat_writer.h41
-rw-r--r--compiler/utils/arena_bit_vector.cc4
-rw-r--r--compiler/utils/arm/managed_register_arm.cc10
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/mips/managed_register_mips.cc11
-rw-r--r--compiler/utils/x86/managed_register_x86.cc13
81 files changed, 3255 insertions, 1692 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 7a78b3a5db..82f1896baf 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -50,6 +50,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
dex/mir_dataflow.cc \
+ dex/mir_field_info.cc \
dex/mir_optimization.cc \
dex/pass_driver.cc \
dex/bb_optimizations.cc \
@@ -109,7 +110,7 @@ endif
LIBART_COMPILER_CFLAGS :=
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
-LIBART_COMPILER_SRC_FILES +=
+LIBART_COMPILER_SRC_FILES += \
dex/portable/mir_to_gbc.cc \
elf_writer_mclinker.cc \
jni/portable/jni_compiler.cc \
@@ -123,11 +124,12 @@ LIBART_COMPILER_SRC_FILES +=
llvm/runtime_support_builder.cc \
llvm/runtime_support_builder_arm.cc \
llvm/runtime_support_builder_x86.cc
- LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
+LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/compiler_enums.h \
+ dex/quick/dex_file_method_inliner.h
# $(1): target or host
# $(2): ndebug or debug
@@ -214,12 +216,15 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
ifeq ($(TARGET_ARCH),arm64)
$$(info TODOAArch64: $$(LOCAL_PATH)/Android.mk Add Arm64 specific MCLinker libraries)
endif # TARGET_ARCH != arm64
+ include $(LLVM_DEVICE_BUILD_MK)
else # host
LOCAL_STATIC_LIBRARIES += libmcldARMInfo libmcldARMTarget
LOCAL_STATIC_LIBRARIES += libmcldX86Info libmcldX86Target
LOCAL_STATIC_LIBRARIES += libmcldMipsInfo libmcldMipsTarget
+ include $(LLVM_HOST_BUILD_MK)
endif
LOCAL_STATIC_LIBRARIES += libmcldCore libmcldObject libmcldADT libmcldFragment libmcldTarget libmcldCodeGen libmcldLDVariant libmcldMC libmcldSupport libmcldLD
+ include $(LLVM_GEN_INTRINSICS_MK)
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
@@ -231,13 +236,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_STATIC_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 0999d09e71..3bdc95ea54 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -203,8 +203,11 @@ class CommonCompilerTest : public CommonRuntimeTest {
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
} else {
// No code? You must mean to go into the interpreter.
- const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
- : GetQuickToInterpreterBridge();
+ // Or the generic JNI...
+ const void* method_code = method->IsNative() ? GetQuickGenericJniTrampoline()
+ : (kUsePortableCompiler
+ ? GetPortableToInterpreterBridge()
+ : GetQuickToInterpreterBridge());
OatFile::OatMethod oat_method = CreateOatMethod(method_code,
kStackAlignment,
0,
@@ -240,7 +243,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
#if GCC_VERSION >= 40303
__builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
#else
- LOG(FATAL) << "UNIMPLEMENTED: cache flush";
+ LOG(WARNING) << "UNIMPLEMENTED: cache flush";
#endif
}
@@ -353,7 +356,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
CHECK(method != nullptr);
TimingLogger timings("CommonTest::CompileMethod", false, false);
timings.StartSplit("CompileOne");
- compiler_driver_->CompileOne(method, timings);
+ compiler_driver_->CompileOne(method, &timings);
MakeExecutable(method);
timings.EndSplit();
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f6d724ab56..d884bc0ef8 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -153,12 +153,14 @@ CompiledMethod::CompiledMethod(CompilerDriver& driver,
const uint32_t fp_spill_mask,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
- const std::vector<uint8_t>& native_gc_map)
+ const std::vector<uint8_t>& native_gc_map,
+ const std::vector<uint8_t>* cfi_info)
: CompiledCode(&driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
mapping_table_(driver.DeduplicateMappingTable(mapping_table)),
vmap_table_(driver.DeduplicateVMapTable(vmap_table)),
- gc_map_(driver.DeduplicateGCMap(native_gc_map)) {
+ gc_map_(driver.DeduplicateGCMap(native_gc_map)),
+ cfi_info_(driver.DeduplicateCFIInfo(cfi_info)) {
}
CompiledMethod::CompiledMethod(CompilerDriver& driver,
@@ -169,10 +171,11 @@ CompiledMethod::CompiledMethod(CompilerDriver& driver,
const uint32_t fp_spill_mask)
: CompiledCode(&driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask) {
- mapping_table_ = driver.DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver.DeduplicateVMapTable(std::vector<uint8_t>());
- gc_map_ = driver.DeduplicateGCMap(std::vector<uint8_t>());
+ core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ mapping_table_(driver.DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver.DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver.DeduplicateGCMap(std::vector<uint8_t>())),
+ cfi_info_(nullptr) {
}
// Constructs a CompiledMethod for the Portable compiler.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 611230509a..90ae6eeae8 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -110,7 +110,8 @@ class CompiledMethod : public CompiledCode {
const uint32_t fp_spill_mask,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
- const std::vector<uint8_t>& native_gc_map);
+ const std::vector<uint8_t>& native_gc_map,
+ const std::vector<uint8_t>* cfi_info);
// Constructs a CompiledMethod for the QuickJniCompiler.
CompiledMethod(CompilerDriver& driver,
@@ -157,6 +158,10 @@ class CompiledMethod : public CompiledCode {
return *gc_map_;
}
+ const std::vector<uint8_t>* GetCFIInfo() const {
+ return cfi_info_;
+ }
+
private:
// For quick code, the size of the activation used by the code.
const size_t frame_size_in_bytes_;
@@ -172,6 +177,8 @@ class CompiledMethod : public CompiledCode {
// For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
// are live. For portable code, the key is a dalvik PC.
std::vector<uint8_t>* gc_map_;
+ // For quick code, a FDE entry for the debug_frame section.
+ std::vector<uint8_t>* cfi_info_;
};
} // namespace art
diff --git a/compiler/compiler_backend.cc b/compiler/compiler_backend.cc
index eaa39f83c1..0afa665eb7 100644
--- a/compiler/compiler_backend.cc
+++ b/compiler/compiler_backend.cc
@@ -83,6 +83,9 @@ static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler,
}
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization();
+
class QuickBackend : public CompilerBackend {
public:
QuickBackend() : CompilerBackend(100) {}
@@ -135,10 +138,11 @@ class QuickBackend : public CompilerBackend {
}
bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
}
@@ -165,11 +169,27 @@ class QuickBackend : public CompilerBackend {
bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
CHECK(set_max);
}
- return mir_to_lir;;
+ return mir_to_lir;
}
void InitCompilationUnit(CompilationUnit& cu) const {}
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
+ OVERRIDE {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization();
+ }
+ return nullptr;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(QuickBackend);
};
@@ -249,11 +269,12 @@ class LLVMBackend : public CompilerBackend {
}
bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return art::ElfWriterMclinker::Create(
file, oat_writer, dex_files, android_root, is_host, driver);
}
@@ -271,15 +292,17 @@ class LLVMBackend : public CompilerBackend {
(1 << kSuppressExceptionEdges);
}
- bool isPortable() const { return true; }
+ bool IsPortable() const OVERRIDE {
+ return true;
+ }
- void SetBitcodeFileName(std::string const& filename) {
- typedef void (*SetBitcodeFileNameFn)(CompilerDriver&, std::string const&);
+ void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+ typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
SetBitcodeFileNameFn set_bitcode_file_name =
reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
- set_bitcode_file_name(*this, filename);
+ set_bitcode_file_name(driver, filename);
}
private:
diff --git a/compiler/compiler_backend.h b/compiler/compiler_backend.h
index 01a69afc89..b473806bba 100644
--- a/compiler/compiler_backend.h
+++ b/compiler/compiler_backend.h
@@ -23,7 +23,7 @@
namespace art {
class Backend;
-class CompilationUnit;
+struct CompilationUnit;
class CompilerDriver;
class CompiledMethod;
class MIRGraph;
@@ -40,8 +40,9 @@ class CompilerBackend {
kPortable
};
- explicit CompilerBackend(int warning)
- : maximum_compilation_time_before_warning_(warning) {}
+ explicit CompilerBackend(uint64_t warning)
+ : maximum_compilation_time_before_warning_(warning) {
+ }
static CompilerBackend* Create(Kind kind);
@@ -49,7 +50,7 @@ class CompilerBackend {
virtual void UnInit(CompilerDriver& driver) const = 0;
- virtual CompiledMethod* Compile(CompilerDriver& compiler,
+ virtual CompiledMethod* Compile(CompilerDriver& driver,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -66,7 +67,7 @@ class CompilerBackend {
virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const = 0;
virtual bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
@@ -79,8 +80,12 @@ class CompilerBackend {
return maximum_compilation_time_before_warning_;
}
- virtual bool IsPortable() const { return false; }
- void SetBitcodeFileName(std::string const& filename) {
+ virtual bool IsPortable() const {
+ return false;
+ }
+
+ void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+ UNUSED(driver);
UNUSED(filename);
}
@@ -88,8 +93,21 @@ class CompilerBackend {
virtual ~CompilerBackend() {}
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
+ const {
+ return nullptr;
+ }
+
private:
- uint64_t maximum_compilation_time_before_warning_;
+ const uint64_t maximum_compilation_time_before_warning_;
DISALLOW_COPY_AND_ASSIGN(CompilerBackend);
};
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1286a8e52e..bd7c40ba5b 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -23,6 +23,20 @@
namespace art {
/**
+ * @class CacheFieldLoweringInfo
+ * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
+ */
+class CacheFieldLoweringInfo : public Pass {
+ public:
+ CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+ }
+
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->DoCacheFieldLoweringInfo();
+ }
+};
+
+/**
* @class CodeLayout
* @brief Perform the code layout pass.
*/
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 8447d23ddc..ee880417ac 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -63,8 +63,9 @@ struct CompilationUnit {
bool verbose;
const CompilerBackend* compiler_backend;
InstructionSet instruction_set;
+ bool target64;
- const InstructionSetFeatures& GetInstructionSetFeatures() {
+ InstructionSetFeatures GetInstructionSetFeatures() {
return compiler_driver->GetInstructionSetFeatures();
}
// TODO: much of this info available elsewhere. Go to the original source?
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff8fea0f88..b9f9437c95 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -208,21 +208,21 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
return;
}
uint32_t field_idx = inst->VRegC_22c();
- int field_offset;
+ MemberOffset field_offset(0u);
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset)) {
+ if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
- << " by field offset " << field_offset
+ << " by field offset " << field_offset.Int32Value()
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
// We are modifying 4 consecutive bytes.
inst->SetOpcode(new_opcode);
// Replace field index by field offset.
- inst->SetVRegC_22c(static_cast<uint16_t>(field_offset));
+ inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
}
}
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 6800f7b2a4..b55b4715eb 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -128,7 +128,7 @@ void CompilationUnit::EndTiming() {
}
}
-static CompiledMethod* CompileMethod(CompilerDriver& compiler,
+static CompiledMethod* CompileMethod(CompilerDriver& driver,
CompilerBackend* compiler_backend,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
@@ -143,12 +143,14 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CompilationUnit cu(&compiler.GetArenaPool());
+ CompilationUnit cu(driver.GetArenaPool());
- cu.compiler_driver = &compiler;
+ cu.compiler_driver = &driver;
cu.class_linker = class_linker;
- cu.instruction_set = compiler.GetInstructionSet();
+ cu.instruction_set = driver.GetInstructionSet();
+ cu.target64 = cu.instruction_set == kX86_64;
cu.compiler_backend = compiler_backend;
+ // TODO: x86_64 is not yet implemented.
DCHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kX86) ||
(cu.instruction_set == kMips));
@@ -216,8 +218,8 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
/* Create the pass driver and launch it */
- PassDriver driver(&cu);
- driver.Launch();
+ PassDriver pass_driver(&cu);
+ pass_driver.Launch();
if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
cu.mir_graph->DumpCheckStats();
@@ -257,9 +259,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
cu.EndTiming();
- compiler.GetTimingsLogger().Start();
- compiler.GetTimingsLogger().AddLogger(cu.timings);
- compiler.GetTimingsLogger().End();
+ driver.GetTimingsLogger()->Start();
+ driver.GetTimingsLogger()->AddLogger(cu.timings);
+ driver.GetTimingsLogger()->End();
return result;
}
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 8ce12067ee..22a7b8cfb0 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -105,7 +105,7 @@ class LLVMInfo {
UniquePtr<art::llvm::IRBuilder> ir_builder_;
};
-struct CompiledMethod;
+class CompiledMethod;
class CompilerDriver;
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a3ea034902..61c676784f 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,6 +16,7 @@
#include "local_value_numbering.h"
+#include "mir_field_info.h"
#include "mir_graph.h"
namespace art {
@@ -534,16 +535,24 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
+ uint16_t type = opcode - Instruction::IGET;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, base);
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_ifield_version_[type],
+ GetMemoryVersion(base, field_id, type));
+ }
if (opcode == Instruction::IGET_WIDE) {
res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -567,10 +576,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
HandleNullCheck(mir, base);
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_ifield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_ifield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
@@ -581,14 +598,22 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
+ uint16_t type = opcode - Instruction::SGET;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_sfield_version_[type],
+ GetMemoryVersion(NO_VALUE, field_id, type));
+ }
if (opcode == Instruction::SGET_WIDE) {
res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -609,10 +634,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
uint16_t type = opcode - Instruction::SPUT;
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_sfield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_sfield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 6ab6c51a1f..4599612db6 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -44,7 +44,7 @@ class LocalValueNumberingTest : public testing::Test {
Instruction::Code opcode;
int64_t value;
- uint32_t field_annotation;
+ uint32_t field_info;
size_t num_uses;
int32_t uses[kMaxSsaUses];
size_t num_defs;
@@ -55,28 +55,41 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, value, 0u, 0, { }, 1, { reg } }
#define DEF_CONST_WIDE(opcode, reg, value) \
{ opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_IGET(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_IGET(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
#define DEF_INVOKE1(opcode, reg) \
{ opcode, 0u, 0u, 1, { reg }, 0, { } }
#define DEF_UNIQUE_REF(opcode, reg) \
{ opcode, 0u, 0u, 0, { }, 1, { reg } } // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+ cu_.mir_graph->ifield_lowering_infos_.Reset();
+ cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const IFieldDef* def = &defs[i];
+ MirIFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = 0u | // Without kFlagIsStatic.
+ (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -85,6 +98,19 @@ class LocalValueNumberingTest : public testing::Test {
}
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.Reset();
+ cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
+ (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -102,6 +128,13 @@ class LocalValueNumberingTest : public testing::Test {
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
+ if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+ mir->meta.ifield_lowering_info = def->field_info;
+ } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ mir->meta.sfield_lowering_info = def->field_info;
+ }
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
@@ -146,7 +179,6 @@ class LocalValueNumberingTest : public testing::Test {
LocalValueNumbering lvn_;
};
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false }
@@ -169,7 +201,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
static const IFieldDef ifields[] = {
@@ -197,7 +228,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
EXPECT_EQ(mirs_[4].optimization_flags, 0u);
}
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -218,9 +248,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -241,9 +269,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -267,7 +293,6 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestVolatile) {
static const IFieldDef ifields[] = {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 7ce8f696be..d159f49b3e 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -14,11 +14,15 @@
* limitations under the License.
*/
+#include <algorithm>
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
+#include "UniquePtr.h"
namespace art {
@@ -1004,6 +1008,11 @@ bool MIRGraph::SkipCompilation() {
return false;
}
+ // Contains a pattern we don't want to compile?
+ if (punt_to_interpreter_) {
+ return true;
+ }
+
if (compiler_filter == CompilerOptions::kInterpretOnly) {
LOG(WARNING) << "InterpretOnly should ideally be filtered out prior to parsing.";
return true;
@@ -1085,4 +1094,109 @@ bool MIRGraph::SkipCompilation() {
return ComputeSkipCompilation(&stats, skip_compilation);
}
+void MIRGraph::DoCacheFieldLoweringInfo() {
+ // Try to use stack-allocated array, resort to heap if we exceed the initial size.
+ static constexpr size_t kInitialSize = 32;
+ uint16_t stack_idxs[kInitialSize];
+ UniquePtr<uint16_t[]> allocated_idxs;
+ uint16_t* field_idxs = stack_idxs;
+ size_t size = kInitialSize;
+
+ // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
+ size_t ifield_pos = 0u;
+ size_t sfield_pos = size;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->block_type != kDalvikByteCode) {
+ continue;
+ }
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::IGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ bool need_alloc = false;
+ const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+ uint16_t field_idx;
+ // Get field index and try to find it among existing indexes. If found, it's usually among
+ // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+ // is a linear search, it actually performs much better than map based approach.
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idx = insn->VRegC_22c();
+ size_t i = ifield_pos;
+ while (i != 0u && field_idxs[i - 1] != field_idx) {
+ --i;
+ }
+ if (i != 0u) {
+ mir->meta.ifield_lowering_info = i - 1;
+ } else {
+ mir->meta.ifield_lowering_info = ifield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[ifield_pos++] = field_idx;
+ }
+ }
+ } else {
+ field_idx = insn->VRegB_21c();
+ size_t i = sfield_pos;
+ while (i != size && field_idxs[i] != field_idx) {
+ ++i;
+ }
+ if (i != size) {
+ mir->meta.sfield_lowering_info = size - i - 1u;
+ } else {
+ mir->meta.sfield_lowering_info = size - sfield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ }
+ if (UNLIKELY(need_alloc)) {
+ DCHECK(field_idxs == stack_idxs);
+ // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
+ uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+ allocated_idxs.reset(new uint16_t[max_refs]);
+ field_idxs = allocated_idxs.get();
+ size_t sfield_count = size - sfield_pos;
+ sfield_pos = max_refs - sfield_count;
+ size = max_refs;
+ memcpy(field_idxs, stack_idxs, ifield_pos * sizeof(field_idxs[0]));
+ memcpy(field_idxs + sfield_pos, stack_idxs + ifield_pos,
+ sfield_count * sizeof(field_idxs[0]));
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idxs[ifield_pos++] = field_idx;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ DCHECK_LE(ifield_pos, sfield_pos);
+ }
+ }
+ }
+
+ if (ifield_pos != 0u) {
+ // Resolve instance field infos.
+ DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
+ ifield_lowering_infos_.Resize(ifield_pos);
+ for (size_t pos = 0u; pos != ifield_pos; ++pos) {
+ ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ ifield_lowering_infos_.GetRawStorage(), ifield_pos);
+ }
+
+ if (sfield_pos != size) {
+ // Resolve static field infos.
+ DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
+ sfield_lowering_infos_.Resize(size - sfield_pos);
+ for (size_t pos = size; pos != sfield_pos;) {
+ --pos;
+ sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ sfield_lowering_infos_.GetRawStorage(), size - sfield_pos);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
new file mode 100644
index 0000000000..96eda01d1e
--- /dev/null
+++ b/compiler/dex/mir_field_info.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_field_info.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirIFieldLoweringInfo unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
+
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ it->flags_ = 0u | // Without kFlagIsStatic.
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u);
+ }
+}
+
+void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirSFieldLoweringInfo unresolved(it->field_idx_);
+ // In 64-bit builds, there's padding after storage_index_, don't include it in memcmp.
+ size_t size = OFFSETOF_MEMBER(MirSFieldLoweringInfo, storage_index_) +
+ sizeof(it->storage_index_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, size), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
+
+ bool is_referrers_class, is_initialized;
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ &it->storage_index_, &is_referrers_class, &is_initialized);
+ it->flags_ = kFlagIsStatic |
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u) |
+ (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+ (is_initialized ? kFlagIsInitialized : 0u);
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
new file mode 100644
index 0000000000..41cb4cee14
--- /dev/null
+++ b/compiler/dex/mir_field_info.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+#define ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+
+/*
+ * Field info is calculated from the perspective of the compilation unit that accesses
+ * the field and stored in that unit's MIRGraph. Therefore it does not need to reference the
+ * dex file or method for which it has been calculated. However, we do store the declaring
+ * field index, class index and dex file of the resolved field to help distinguish between fields.
+ */
+
+class MirFieldInfo {
+ public:
+ uint16_t FieldIndex() const {
+ return field_idx_;
+ }
+
+ bool IsStatic() const {
+ return (flags_ & kFlagIsStatic) != 0u;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringFieldIndex() const {
+ return declaring_field_idx_;
+ }
+
+ bool IsVolatile() const {
+ return (flags_ & kFlagIsVolatile) != 0u;
+ }
+
+ protected:
+ enum {
+ kBitIsStatic = 0,
+ kBitIsVolatile,
+ kFieldInfoBitEnd
+ };
+ static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
+ static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+ MirFieldInfo(uint16_t field_idx, uint16_t flags)
+ : field_idx_(field_idx),
+ flags_(flags),
+ declaring_field_idx_(0u),
+ declaring_class_idx_(0u),
+ declaring_dex_file_(nullptr) {
+ }
+
+ // Make copy-ctor/assign/dtor protected to avoid slicing.
+ MirFieldInfo(const MirFieldInfo& other) = default;
+ MirFieldInfo& operator=(const MirFieldInfo& other) = default;
+ ~MirFieldInfo() = default;
+
+ // The field index in the compiling method's dex file.
+ uint16_t field_idx_;
+ // Flags, for volatility and derived class data.
+ uint16_t flags_;
+ // The field index in the dex file that defines field, 0 if unresolved.
+ uint16_t declaring_field_idx_;
+ // The type index of the class declaring the field, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+};
+
+class MirIFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested instance field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access
+ // with IGET/IPUT. For fast path fields, retrieve the field offset.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved instance field lowering info.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile), // Without kFlagIsStatic.
+ field_offset_(0u) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kIFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+
+ friend class LocalValueNumberingTest;
+};
+
+class MirSFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested static field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access with
+ // IGET/IPUT. For fast path fields (at least for IGET), retrieve the information needed for
+ // the field access, i.e. the field offset, whether the field is in the same class as the
+ // method being compiled, whether the declaring class can be safely assumed to be initialized
+ // and the type index of the declaring class in the compiled method's dex file.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved static field lowering info.
+ explicit MirSFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic),
+ field_offset_(0u),
+ storage_index_(DexFile::kDexNoIndex) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ bool IsReferrersClass() const {
+ return (flags_ & kFlagIsReferrersClass) != 0u;
+ }
+
+ bool IsInitialized() const {
+ return (flags_ & kFlagIsInitialized) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ uint32_t StorageIndex() const {
+ return storage_index_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kBitIsReferrersClass,
+ kBitIsInitialized,
+ kSFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+ static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
+ static constexpr uint16_t kFlagIsInitialized = 1u << kBitIsInitialized;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+ // The type index of the declaring class in the compiling method's dex file,
+ // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
+ uint32_t storage_index_;
+
+ friend class LocalValueNumberingTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_MIR_FIELD_INFO_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e4550d1e60..46e854fb2b 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -86,7 +86,10 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
forward_branches_(0),
compiler_temps_(arena, 6, kGrowableArrayMisc),
num_non_special_compiler_temps_(0),
- max_available_non_special_compiler_temps_(0) {
+ max_available_non_special_compiler_temps_(0),
+ punt_to_interpreter_(false),
+ ifield_lowering_infos_(arena, 0u),
+ sfield_lowering_infos_(arena, 0u) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
@@ -610,6 +613,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+ int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
uint64_t df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
@@ -676,6 +680,19 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
} else if (flags & Instruction::kSwitch) {
cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
}
+ if (verify_flags & Instruction::kVerifyVarArgRange) {
+ /*
+ * The Quick backend's runtime model includes a gap between a method's
+ * argument ("in") vregs and the rest of its vregs. Handling a range instruction
+ * which spans the gap is somewhat complicated, and should not happen
+ * in normal usage of dx. Punt to the interpreter.
+ */
+ int first_reg_in_range = insn->dalvikInsn.vC;
+ int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
+ if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
+ punt_to_interpreter_ = true;
+ }
+ }
current_offset_ += width;
BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
false, /* immed_pred_block_p */ NULL);
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d344055656..1eb9ef9bef 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,8 +20,11 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "compiler_ir.h"
+#include "mir_field_info.h"
+#include "invoke_type.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
+#include "reg_storage.h"
namespace art {
@@ -165,7 +168,7 @@ enum OatMethodAttributes {
#define INVALID_SREG (-1)
#define INVALID_VREG (0xFFFFU)
-#define INVALID_REG (0xFF)
+#define INVALID_REG (0x7F)
#define INVALID_OFFSET (0xDEADF00FU)
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
@@ -256,8 +259,14 @@ struct MIR {
BasicBlockId* phi_incoming;
// Establish link from check instruction (kMirOpCheck) to the actual throwing instruction.
MIR* throw_insn;
- // Fused cmp branch condition.
+ // Branch condition for fused cmp or select.
ConditionCode ccode;
+ // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
+ uint32_t ifield_lowering_info;
+ // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
+ uint32_t sfield_lowering_info;
} meta;
};
@@ -328,9 +337,8 @@ struct RegLocation {
unsigned ref:1; // Something GC cares about.
unsigned high_word:1; // High word of pair?
unsigned home:1; // Does this represent the home location?
- VectorLengthType vec_len:3; // Is this value in a vector register, and how big is it?
- uint8_t low_reg; // First physical register.
- uint8_t high_reg; // 2nd physical register (if wide).
+ VectorLengthType vec_len:3; // TODO: remove. Is this value in a vector register, and how big is it?
+ RegStorage reg; // Encoded physical registers.
int16_t s_reg_low; // SSA name for low Dalvik word.
int16_t orig_sreg; // TODO: remove after Bitcode gen complete
// and consolidate usage w/ s_reg_low.
@@ -361,7 +369,7 @@ struct CallInfo {
const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
- INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::kInvalid), INVALID_SREG, INVALID_SREG};
class MIRGraph {
public:
@@ -466,6 +474,18 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ void DoCacheFieldLoweringInfo();
+
+ const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
+ return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
+ }
+
+ const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
+ return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
+ }
+
void InitRegLocations();
void RemapRegLocations();
@@ -684,6 +704,11 @@ class MIRGraph {
return opcode >= static_cast<int>(kMirOpFirst);
}
+ // Is this vreg in the in set?
+ bool IsInVReg(int vreg) {
+ return (vreg >= cu_->num_regs);
+ }
+
void DumpCheckStats();
MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
int SRegToVReg(int ssa_reg) const;
@@ -917,6 +942,9 @@ class MIRGraph {
size_t num_non_special_compiler_temps_;
size_t max_available_non_special_compiler_temps_;
size_t max_available_special_compiler_temps_;
+ bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
+ GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
+ GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 67d476929e..243452e968 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -195,6 +195,28 @@ static SelectInstructionKind SelectKind(MIR* mir) {
}
}
+static constexpr ConditionCode kIfCcZConditionCodes[] = {
+ kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
+};
+
+COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
+ if_ccz_ccodes_size1);
+
+static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
+ return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
+}
+
+static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
+ return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
+}
+
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
+COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
+
int MIRGraph::GetSSAUseCount(int s_reg) {
return raw_use_counts_.Get(s_reg);
}
@@ -207,10 +229,11 @@ size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
}
}
+
+// FIXME - will probably need to revisit all uses of this, as type not defined.
static const RegLocation temp_loc = {kLocCompilerTemp,
- 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
- kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG,
- INVALID_SREG};
+ 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/, kVectorNotUsed,
+ RegStorage(), INVALID_SREG, INVALID_SREG};
CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
// There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
@@ -312,35 +335,11 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
if (mir->next != NULL) {
MIR* mir_next = mir->next;
- Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
- ConditionCode ccode = kCondNv;
- switch (br_opcode) {
- case Instruction::IF_EQZ:
- ccode = kCondEq;
- break;
- case Instruction::IF_NEZ:
- ccode = kCondNe;
- break;
- case Instruction::IF_LTZ:
- ccode = kCondLt;
- break;
- case Instruction::IF_GEZ:
- ccode = kCondGe;
- break;
- case Instruction::IF_GTZ:
- ccode = kCondGt;
- break;
- case Instruction::IF_LEZ:
- ccode = kCondLe;
- break;
- default:
- break;
- }
// Make sure result of cmp is used by next insn and nowhere else
- if ((ccode != kCondNv) &&
+ if (IsInstructionIfCcZ(mir->next->dalvikInsn.opcode) &&
(mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
(GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
- mir_next->meta.ccode = ccode;
+ mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
switch (opcode) {
case Instruction::CMPL_FLOAT:
mir_next->dalvikInsn.opcode =
@@ -408,8 +407,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// TUNING: expand to support IF_xx compare & branches
if (!cu_->compiler_backend->IsPortable() &&
(cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86) &&
- ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
- (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
+ IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
DCHECK(ft != NULL);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
@@ -456,12 +454,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
* NOTE: not updating other dataflow info (no longer used at this point).
* If this changes, need to update i_dom, etc. here (and in CombineBlocks).
*/
- if (opcode == Instruction::IF_NEZ) {
- // Normalize.
- MIR* tmp_mir = if_true;
- if_true = if_false;
- if_false = tmp_mir;
- }
+ mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
bool const_form = (SelectKind(if_true) == kSelectConst);
if ((SelectKind(if_true) == kSelectMove)) {
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 255892e324..9457d5be76 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -22,8 +22,8 @@
namespace art {
// Forward declarations.
-class BasicBlock;
-class CompilationUnit;
+struct BasicBlock;
+struct CompilationUnit;
class Pass;
/**
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index b60f29697b..256bcb1473 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -91,6 +91,7 @@ void PassDriver::CreatePasses() {
* - This is not yet an issue: no current pass would require it.
*/
static const Pass* const passes[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
GetPassInstance<CodeLayout>(),
GetPassInstance<SSATransformation>(),
GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 3187fbb28c..70438ecd50 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -30,6 +30,7 @@
#include "dex/compiler_internals.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex/frontend.h"
+#include "llvm/ir_builder.h"
#include "llvm/llvm_compilation_unit.h"
#include "llvm/utils_llvm.h"
#include "mir_to_gbc.h"
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 2b681f6097..e97634c519 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -17,11 +17,18 @@
#ifndef ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
#define ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
+#include <llvm/ADT/ArrayRef.h>
+#include <llvm/IR/BasicBlock.h>
+#include <llvm/IR/IRBuilder.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/Module.h>
+
#include "invoke_type.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
#include "dex/backend.h"
+#include "llvm/intrinsic_helper.h"
#include "llvm/llvm_compilation_unit.h"
#include "safe_map.h"
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 37b4ec6dc7..9f52f20f35 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -117,14 +117,6 @@ namespace art {
// Mask to strip off fp flags.
#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
-// RegisterLocation templates return values (r0, or r0/r1).
-#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, INVALID_REG, \
- INVALID_SREG, INVALID_SREG}
-#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1, \
- INVALID_SREG, INVALID_SREG}
-#define ARM_LOC_C_RETURN_FLOAT ARM_LOC_C_RETURN
-#define ARM_LOC_C_RETURN_DOUBLE ARM_LOC_C_RETURN_WIDE
-
enum ArmResourceEncodingPos {
kArmGPReg0 = 0,
kArmRegSP = 13,
@@ -225,6 +217,20 @@ enum ArmNativeRegisterPool {
#define rARM_INVOKE_TGT rARM_LR
#define rARM_COUNT INVALID_REG
+// RegisterLocation templates return values (r0, or r0/r1).
+const RegLocation arm_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+
enum ArmShiftEncodings {
kArmLsl = 0x0,
kArmLsr = 0x1,
@@ -451,7 +457,6 @@ enum ArmOpcode {
kThumb2MovImm16LST, // Special purpose version for switch table use.
kThumb2MovImm16HST, // Special purpose version for switch table use.
kThumb2LdmiaWB, // ldmia [111010011001[ rn[19..16] mask[15..0].
- kThumb2SubsRRI12, // setflags encoding.
kThumb2OrrRRRs, // orrs [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
kThumb2Push1, // t3 encoding of push.
kThumb2Pop1, // t3 encoding of pop.
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 00939ec98b..2a9b5a50e2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -995,11 +995,6 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
kFmtUnused, -1, -1,
IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
"ldmia", "!0C!!, <!1R>", 4, kFixupNone),
- ENCODING_MAP(kThumb2SubsRRI12, 0xf1b00000,
- kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
- kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
- "subs", "!0C,!1C,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2OrrRRRs, 0xea500000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b36dde98b2..f426055068 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -79,13 +79,14 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
LIR* target = NewLIR0(kPseudoTargetLabel);
// Load next key/disp
NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
- OpRegReg(kOpCmp, r_key, rl_src.low_reg);
+ OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg());
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
OpIT(kCondEq, "");
LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
tab_rec->anchor = switch_branch;
// Needs to use setflags encoding here
- NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1);
+ OpRegRegImm(kOpSub, r_idx, r_idx, 1); // For value == 1, this should set flags.
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpCondBranch(kCondNe, target);
}
@@ -115,10 +116,10 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.low_reg;
+ keyReg = rl_src.reg.GetReg();
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -293,7 +294,7 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
+ LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg());
LoadConstant(reset_reg, 0);
StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 65dee807a1..2c0cead6ca 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -49,7 +49,7 @@ class ArmMir2Lir : public Mir2Lir {
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 46542e118c..dd0a429a85 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -63,7 +63,7 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -143,16 +143,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode,
break;
case Instruction::LONG_TO_DOUBLE: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
rl_result = EvalLoc(rl_dest, kFPReg, true);
// TODO: clean up AllocTempDouble so that its result has the double bits set.
int tmp1 = AllocTempDouble();
int tmp2 = AllocTempDouble();
NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
- NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.low_reg, rl_result.high_reg), (src_reg & ~ARM_FP_DOUBLE));
+ NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE));
LoadConstantWide(tmp2, tmp2 + 1, 0x41f0000000000000LL);
- NewLIR3(kThumb2VmlaF64, S2d(rl_result.low_reg, rl_result.high_reg), tmp1 | ARM_FP_DOUBLE,
+ NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE,
tmp2 | ARM_FP_DOUBLE);
FreeTemp(tmp1);
FreeTemp(tmp2);
@@ -173,18 +173,18 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -199,14 +199,14 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
NewLIR0(kThumb2Fmstat);
ConditionCode ccode = mir->meta.ccode;
@@ -273,28 +273,28 @@ void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
// In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.low_reg, default_result);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ LoadConstant(rl_result.reg.GetReg(), default_result);
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
// In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.low_reg, default_result);
- NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), default_result);
+ NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- DCHECK(!ARM_FPREG(rl_result.low_reg));
+ DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
NewLIR0(kThumb2Fmstat);
OpIT((default_result == -1) ? kCondGt : kCondMi, "");
- NewLIR2(kThumb2MovI8M, rl_result.low_reg,
+ NewLIR2(kThumb2MovI8M, rl_result.reg.GetReg(),
ModifiedImmediate(-default_result)); // Must not alter ccodes
GenBarrier();
OpIT(kCondEq, "");
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
GenBarrier();
StoreValue(rl_dest, rl_result);
@@ -304,7 +304,7 @@ void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+ NewLIR2(kThumb2Vnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -312,8 +312,8 @@ void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -324,18 +324,18 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
- NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_result.low_reg, rl_result.high_reg));
+ NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()));
NewLIR0(kThumb2Fmstat);
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
NewLIR1(kThumbBlxR, r_tgt);
- NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+ NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1);
branch->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 43928fc5e2..fb2096f1e9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -90,10 +90,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
int t_reg = AllocTemp();
LoadConstant(t_reg, -1);
- OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
LIR* branch1 = OpCondBranch(kCondLt, NULL);
LIR* branch2 = OpCondBranch(kCondGt, NULL);
- OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
LIR* branch3 = OpCondBranch(kCondEq, NULL);
OpIT(kCondHi, "E");
@@ -107,7 +107,7 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
target1 = NewLIR0(kPseudoTargetLabel);
RegLocation rl_temp = LocCReturn(); // Just using as template, will change
- rl_temp.low_reg = t_reg;
+ rl_temp.reg.SetReg(t_reg);
StoreValue(rl_dest, rl_temp);
FreeTemp(t_reg);
@@ -125,8 +125,8 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.low_reg;
- int32_t high_reg = rl_src1.high_reg;
+ int32_t low_reg = rl_src1.reg.GetReg();
+ int32_t high_reg = rl_src1.reg.GetHighReg();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -172,21 +172,35 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
rl_src = LoadValue(rl_src, kCoreReg);
+ ConditionCode ccode = mir->meta.ccode;
if (mir->ssa_rep->num_uses == 1) {
// CONST case
int true_val = mir->dalvikInsn.vB;
int false_val = mir->dalvikInsn.vC;
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if ((true_val == 1) && (false_val == 0)) {
- OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
- OpIT(kCondUlt, "");
- LoadConstant(rl_result.low_reg, 0);
+ // Change kCondNe to kCondEq for the special cases below.
+ if (ccode == kCondNe) {
+ ccode = kCondEq;
+ std::swap(true_val, false_val);
+ }
+ bool cheap_false_val = InexpensiveConstantInt(false_val);
+ if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
+ OpRegRegImm(kOpSub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), -true_val);
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+ OpIT(true_val == 0 ? kCondNe : kCondUge, "");
+ LoadConstant(rl_result.reg.GetReg(), false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
- } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
- OpIT(kCondEq, "E");
- LoadConstant(rl_result.low_reg, true_val);
- LoadConstant(rl_result.low_reg, false_val);
+ } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1);
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
+ OpIT(kCondLs, "");
+ LoadConstant(rl_result.reg.GetReg(), false_val);
+ GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+ } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpIT(ccode, "E");
+ LoadConstant(rl_result.reg.GetReg(), true_val);
+ LoadConstant(rl_result.reg.GetReg(), false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else {
// Unlikely case - could be tuned.
@@ -194,10 +208,10 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
int t_reg2 = AllocTemp();
LoadConstant(t_reg1, true_val);
LoadConstant(t_reg2, false_val);
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
- OpIT(kCondEq, "E");
- OpRegCopy(rl_result.low_reg, t_reg1);
- OpRegCopy(rl_result.low_reg, t_reg2);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpIT(ccode, "E");
+ OpRegCopy(rl_result.reg.GetReg(), t_reg1);
+ OpRegCopy(rl_result.reg.GetReg(), t_reg2);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
} else {
@@ -207,17 +221,17 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
rl_true = LoadValue(rl_true, kCoreReg);
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
- if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
- OpIT(kCondNe, "");
- OpRegCopy(rl_result.low_reg, rl_false.low_reg);
- } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
- OpIT(kCondEq, "");
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
+ OpIT(NegateComparison(ccode), "");
+ OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
+ OpIT(ccode, "");
+ OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
} else { // Normal - select between the two.
- OpIT(kCondEq, "E");
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ OpIT(ccode, "E");
+ OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
}
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
@@ -247,7 +261,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
@@ -278,7 +292,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
OpCondBranch(ccode, taken);
}
@@ -415,21 +429,21 @@ bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int r_hi = AllocTemp();
int r_lo = AllocTemp();
- NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
+ NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
- rl_src.low_reg, EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi,
+ rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
- OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
@@ -476,7 +490,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
// Simple case, use sdiv instruction.
- OpRegRegReg(kOpDiv, rl_result.low_reg, reg1, reg2);
+ OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2);
} else {
// Remainder case, use the following code:
// temp = reg1 / reg2 - integer division
@@ -486,7 +500,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
int temp = AllocTemp();
OpRegRegReg(kOpDiv, temp, reg1, reg2);
OpRegReg(kOpMul, temp, reg2);
- OpRegRegReg(kOpSub, rl_result.low_reg, reg1, temp);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp);
FreeTemp(temp);
}
@@ -501,10 +515,10 @@ bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
rl_src2 = LoadValue(rl_src2, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
OpIT((is_min) ? kCondGt : kCondLt, "E");
- OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
GenBarrier();
StoreValue(rl_dest, rl_result);
return true;
@@ -518,18 +532,18 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
- if (rl_address.low_reg != rl_result.low_reg) {
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
+ if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) {
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
} else {
- LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
}
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -543,13 +557,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == kLong) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, kWord);
- StoreBaseDisp(rl_address.low_reg, 4, rl_value.high_reg, kWord);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord);
+ StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
}
return true;
}
@@ -589,24 +603,24 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
bool load_early = true;
if (is_long) {
bool expected_is_core_reg =
- rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.low_reg);
+ rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg());
bool new_value_is_core_reg =
- rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.low_reg);
- bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.low_reg);
- bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.low_reg);
+ rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg());
+ bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg());
+ bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg());
if (!expected_is_good_reg && !new_value_is_good_reg) {
// None of expected/new_value is non-temp reg, need to load both late
load_early = false;
// Make sure they are not in the temp regs and the load will not be skipped.
if (expected_is_core_reg) {
- FlushRegWide(rl_src_expected.low_reg, rl_src_expected.high_reg);
+ FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg());
ClobberSReg(rl_src_expected.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
rl_src_expected.location = kLocDalvikFrame;
}
if (new_value_is_core_reg) {
- FlushRegWide(rl_src_new_value.low_reg, rl_src_new_value.high_reg);
+ FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg());
ClobberSReg(rl_src_new_value.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
rl_src_new_value.location = kLocDalvikFrame;
@@ -627,19 +641,19 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
- MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
int r_ptr = rARM_LR;
- OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
+ OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
// Free now unneeded rl_object and rl_offset to give more temps.
ClobberSReg(rl_object.s_reg_low);
- FreeTemp(rl_object.low_reg);
+ FreeTemp(rl_object.reg.GetReg());
ClobberSReg(rl_offset.s_reg_low);
- FreeTemp(rl_offset.low_reg);
+ FreeTemp(rl_offset.reg.GetReg());
RegLocation rl_expected;
if (!is_long) {
@@ -647,8 +661,11 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
- rl_new_value.low_reg = rl_expected.low_reg = AllocTemp();
- rl_new_value.high_reg = rl_expected.high_reg = AllocTemp();
+ // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
+ int low_reg = AllocTemp();
+ int high_reg = AllocTemp();
+ rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ rl_expected = rl_new_value;
}
// do {
@@ -662,13 +679,13 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_long) {
int r_tmp_high = AllocTemp();
if (!load_early) {
- LoadValueDirectWide(rl_src_expected, rl_expected.low_reg, rl_expected.high_reg);
+ LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg());
}
NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr);
- OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
- OpRegReg(kOpSub, r_tmp_high, rl_expected.high_reg);
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
+ OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg());
if (!load_early) {
- LoadValueDirectWide(rl_src_new_value, rl_new_value.low_reg, rl_new_value.high_reg);
+ LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg());
}
// Make sure we use ORR that sets the ccode
if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) {
@@ -680,14 +697,14 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.low_reg, rl_new_value.high_reg, r_ptr);
+ NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr);
} else {
NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0);
- OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.low_reg, r_ptr, 0);
+ NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0);
}
// Still one conditional left from OpIT(kCondEq, "T") from either branch
@@ -695,16 +712,16 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
OpCondBranch(kCondEq, target);
if (!load_early) {
- FreeTemp(rl_expected.low_reg); // Now unneeded.
- FreeTemp(rl_expected.high_reg); // Now unneeded.
+ FreeTemp(rl_expected.reg.GetReg()); // Now unneeded.
+ FreeTemp(rl_expected.reg.GetHighReg()); // Now unneeded.
}
// result := (tmp1 != 0) ? 0 : 1;
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpRsub, rl_result.low_reg, r_tmp, 1);
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.low_reg, 0); /* cc */
+ LoadConstant(rl_result.reg.GetReg(), 0); /* cc */
FreeTemp(r_tmp); // Now unneeded.
StoreValue(rl_dest, rl_result);
@@ -730,10 +747,10 @@ LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+ OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -753,7 +770,8 @@ LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
// Decrement register and branch on condition
LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
// Combine sub & test using sub setflags encoding here
- NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
+ OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
return OpCondBranch(c_code, target);
}
@@ -782,14 +800,14 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
int z_reg = AllocTemp();
LoadConstantNoClobber(z_reg, 0);
// Check for destructive overlap
- if (rl_result.low_reg == rl_src.high_reg) {
+ if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
int t_reg = AllocTemp();
- OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
- OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
- OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg());
}
FreeTemp(z_reg);
StoreValueWide(rl_dest, rl_result);
@@ -827,41 +845,41 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
bool special_case = true;
// If operands are the same, or any pair has been promoted we're not the special case.
if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
- (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
- (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
+ (!IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg())) ||
+ (!IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg()))) {
special_case = false;
}
// Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
int res_lo = AllocTemp();
int res_hi;
- if (rl_src1.low_reg == rl_src2.low_reg) {
+ if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
res_hi = AllocTemp();
- NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
// In the special case, all temps are now allocated
- NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
if (special_case) {
- DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
- DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
- FreeTemp(rl_src1.high_reg);
+ DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ FreeTemp(rl_src1.reg.GetHighReg());
}
res_hi = AllocTemp();
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
- NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
if (special_case) {
- FreeTemp(rl_src1.low_reg);
- Clobber(rl_src1.low_reg);
- Clobber(rl_src1.high_reg);
+ FreeTemp(rl_src1.reg.GetReg());
+ Clobber(rl_src1.reg.GetReg());
+ Clobber(rl_src1.reg.GetHighReg());
}
}
FreeTemp(tmp1);
rl_result = GetReturnWide(false); // Just using as a template.
- rl_result.low_reg = res_lo;
- rl_result.high_reg = res_hi;
+ rl_result.reg.SetReg(res_lo);
+ rl_result.reg.SetHighReg(res_hi);
StoreValueWide(rl_dest, rl_result);
// Now, restore lr to its non-temp status.
Clobber(rARM_LR);
@@ -920,25 +938,25 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
int reg_ptr;
if (constant_index) {
- reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
+ reg_ptr = rl_array.reg.GetReg(); // NOTE: must not alter reg_ptr in constant case.
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
EncodeShift(kArmLsl, scale));
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -946,18 +964,18 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -966,15 +984,15 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
} else {
// Offset base, then use indexed load
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
- FreeTemp(rl_array.low_reg);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ FreeTemp(rl_array.reg.GetReg());
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1010,17 +1028,17 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
int reg_ptr;
bool allocated_reg_ptr_temp = false;
if (constant_index) {
- reg_ptr = rl_array.low_reg;
- } else if (IsTemp(rl_array.low_reg) && !card_mark) {
- Clobber(rl_array.low_reg);
- reg_ptr = rl_array.low_reg;
+ reg_ptr = rl_array.reg.GetReg();
+ } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
+ Clobber(rl_array.reg.GetReg());
+ reg_ptr = rl_array.reg.GetReg();
} else {
allocated_reg_ptr_temp = true;
reg_ptr = AllocTemp();
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -1028,7 +1046,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1038,39 +1056,39 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
EncodeShift(kArmLsl, scale));
}
if (needs_range_check) {
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
}
} else {
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
@@ -1093,53 +1111,53 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
- OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg());
} else if (shift_amount == 32) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else {
- OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(),
EncodeShift(kArmLsr, 32 - shift_amount));
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
}
break;
default:
@@ -1194,36 +1212,36 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- NewLIR3(kThumb2AddRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
- NewLIR3(kThumb2AdcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
- OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
+ OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
}
- if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
- OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
+ OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
}
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
- OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
- if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
- OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
+ OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
}
- if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
- OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
}
break;
case Instruction::SUB_LONG_2ADDR:
case Instruction::SUB_LONG:
- NewLIR3(kThumb2SubRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
- NewLIR3(kThumb2SbcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 83431ad235..ab1a053489 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -37,23 +37,19 @@ static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
RegLocation ArmMir2Lir::LocCReturn() {
- RegLocation res = ARM_LOC_C_RETURN;
- return res;
+ return arm_loc_c_return;
}
RegLocation ArmMir2Lir::LocCReturnWide() {
- RegLocation res = ARM_LOC_C_RETURN_WIDE;
- return res;
+ return arm_loc_c_return_wide;
}
RegLocation ArmMir2Lir::LocCReturnFloat() {
- RegLocation res = ARM_LOC_C_RETURN_FLOAT;
- return res;
+ return arm_loc_c_return_float;
}
RegLocation ArmMir2Lir::LocCReturnDouble() {
- RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
- return res;
+ return arm_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -530,14 +526,10 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
return new ArmMir2Lir(cu, mir_graph, arena);
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) {
+// Alloc a pair of core registers, or a double.
+RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
@@ -546,8 +538,7 @@ int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) {
low_reg = AllocTemp();
high_reg = AllocTemp();
}
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -594,11 +585,11 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() {
void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
/*
@@ -697,19 +688,19 @@ void ArmMir2Lir::ClobberCallerSave() {
RegLocation ArmMir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- res.low_reg = r2;
- res.high_reg = r3;
+ res.reg.SetReg(r2);
+ res.reg.SetHighReg(r3);
Clobber(r2);
Clobber(r3);
MarkInUse(r2);
MarkInUse(r3);
- MarkPair(res.low_reg, res.high_reg);
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
return res;
}
RegLocation ArmMir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.low_reg = r1;
+ res.reg.SetReg(r1);
Clobber(r1);
MarkInUse(r1);
return res;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 9d3968bff2..c2cfb4dbc1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -499,12 +499,6 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
else
opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
return NewLIR3(opcode, r_dest, r_src1, abs_value);
- } else if ((abs_value & 0x3ff) == abs_value) {
- if (op == kOpAdd)
- opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
- else
- opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return NewLIR3(opcode, r_dest, r_src1, abs_value);
}
if (mod_imm < 0) {
mod_imm = ModifiedImmediate(-value);
@@ -512,6 +506,15 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
op = (op == kOpAdd) ? kOpSub : kOpAdd;
}
}
+ if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
+ // This is deliberately used only if modified immediate encoding is inadequate since
+ // we sometimes actually use the flags for small values but not necessarily low regs.
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
+ else
+ opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
+ return NewLIR3(opcode, r_dest, r_src1, abs_value);
+ }
if (op == kOpSub) {
opcode = kThumb2SubRRI8M;
alt_opcode = kThumb2SubRRR;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 31854496ab..14469b61c3 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -78,11 +78,6 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
}
-bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
- return cu_->compiler_driver->ComputeInstanceFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
-}
-
/* Remove a LIR from the list. */
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
@@ -379,21 +374,21 @@ LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
return AddWordData(constant_list_p, val_lo);
}
-static void PushWord(std::vector<uint8_t>&buf, int data) {
+static void Push32(std::vector<uint8_t>&buf, int data) {
buf.push_back(data & 0xff);
buf.push_back((data >> 8) & 0xff);
buf.push_back((data >> 16) & 0xff);
buf.push_back((data >> 24) & 0xff);
}
-// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
-static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
- uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
- if (sizeof(void*) == sizeof(uint64_t)) {
- PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
- PushWord(buf, data & 0xFFFFFFFF);
+// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems.
+static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) {
+ uint64_t data = reinterpret_cast<uintptr_t>(pointer);
+ if (target64) {
+ Push32(buf, data & 0xFFFFFFFF);
+ Push32(buf, (data >> 32) & 0xFFFFFFFF);
} else {
- PushWord(buf, data);
+ Push32(buf, static_cast<uint32_t>(data));
}
}
@@ -408,7 +403,7 @@ void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
while (data_lir != NULL) {
- PushWord(code_buffer_, data_lir->operands[0]);
+ Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// Push code and method literals, record offsets for the compiler to patch.
@@ -424,7 +419,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
@@ -439,7 +434,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
// Push class literals.
@@ -453,7 +448,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -497,8 +492,8 @@ void Mir2Lir::InstallSwitchTables() {
<< std::hex << keys[elems] << ", disp: 0x"
<< std::hex << disp;
}
- PushWord(code_buffer_, keys[elems]);
- PushWord(code_buffer_,
+ Push32(code_buffer_, keys[elems]);
+ Push32(code_buffer_,
tab_rec->targets[elems]->offset - bx_offset);
}
} else {
@@ -510,7 +505,7 @@ void Mir2Lir::InstallSwitchTables() {
LOG(INFO) << " Case[" << elems << "] disp: 0x"
<< std::hex << disp;
}
- PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
+ Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
}
}
}
@@ -972,6 +967,22 @@ ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
return res;
}
+ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
+ ConditionCode res;
+ switch (before) {
+ case kCondEq: res = kCondNe; break;
+ case kCondNe: res = kCondEq; break;
+ case kCondLt: res = kCondGe; break;
+ case kCondGt: res = kCondLe; break;
+ case kCondLe: res = kCondGt; break;
+ case kCondGe: res = kCondLt; break;
+ default:
+ res = static_cast<ConditionCode>(0);
+ LOG(FATAL) << "Unexpected ccode " << before;
+ }
+ return res;
+}
+
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Backend(arena),
@@ -1070,10 +1081,12 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
DCHECK_EQ(fp_vmap_table_.size(), 0u);
vmap_encoder.PushBackUnsigned(0u); // Size is 0.
}
+
+ UniquePtr<std::vector<uint8_t> > cfi_info(ReturnCallFrameInformation());
CompiledMethod* result =
new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
- vmap_encoder.GetData(), native_gc_map_);
+ vmap_encoder.GetData(), native_gc_map_, cfi_info.get());
return result;
}
@@ -1216,4 +1229,9 @@ void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_re
AppendLIR(load_pc_rel);
}
+std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
+ // Default case is to do nothing.
+ return nullptr;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 3dcb964fab..b4d8dd6009 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,7 +31,7 @@ namespace verifier {
class MethodVerifier;
} // namespace verifier
-class CallInfo;
+struct CallInfo;
class Mir2Lir;
enum InlineMethodOpcode : uint16_t {
@@ -61,6 +61,7 @@ enum InlineMethodOpcode : uint16_t {
kInlineOpIGet,
kInlineOpIPut,
};
+std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
enum InlineMethodFlags : uint16_t {
kNoInlineMethodFlags = 0x0000,
@@ -78,13 +79,13 @@ enum IntrinsicFlags {
// kIntrinsicIsEmptyOrLength
kIntrinsicFlagLength = kIntrinsicFlagNone,
- kIntrinsicFlagIsEmpty = 1,
+ kIntrinsicFlagIsEmpty = kIntrinsicFlagMin,
// kIntrinsicIndexOf
- kIntrinsicFlagBase0 = 1,
+ kIntrinsicFlagBase0 = kIntrinsicFlagMin,
// kIntrinsicUnsafeGet, kIntrinsicUnsafePut, kIntrinsicUnsafeCas
- kIntrinsicFlagIsLong = 1,
+ kIntrinsicFlagIsLong = kIntrinsicFlagMin,
// kIntrinsicUnsafeGet, kIntrinsicUnsafePut
kIntrinsicFlagIsVolatile = 2,
// kIntrinsicUnsafePut, kIntrinsicUnsafeCas
@@ -187,7 +188,6 @@ class DexFileMethodInliner {
*/
bool GenSpecial(Mir2Lir* backend, uint32_t method_idx);
- private:
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
@@ -311,6 +311,7 @@ class DexFileMethodInliner {
kProtoCacheLast
};
+ private:
/**
* The maximum number of method parameters we support in the ProtoDef.
*/
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0533fbfcd7..00c51d40d3 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -128,12 +128,12 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
if ((rl_temp.location == kLocDalvikFrame) &&
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
- OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
+ OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
- OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+ OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -163,17 +163,17 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
+ OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- LoadValueDirect(rl_src, rl_result.low_reg);
+ LoadValueDirect(rl_src, rl_result.reg.GetReg());
}
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31);
StoreValueWide(rl_dest, rl_result);
}
@@ -195,7 +195,7 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -290,7 +290,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, kWord);
+ loc.reg.GetReg(), kWord);
}
}
/*
@@ -341,10 +341,10 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
StoreBaseDisp(TargetReg(kRet0),
mirror::Array::DataOffset(component_size).Int32Value() +
- i * 4, rl_arg.low_reg, kWord);
+ i * 4, rl_arg.reg.GetReg(), kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(rl_arg.low_reg)) {
- FreeTemp(rl_arg.low_reg);
+ if (IsTemp(rl_arg.reg.GetReg())) {
+ FreeTemp(rl_arg.reg.GetReg());
}
}
}
@@ -381,33 +381,27 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
const int r_base_;
};
-void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
- if (IsTemp(rl_method.low_reg)) {
- FreeTemp(rl_method.low_reg);
+ if (IsTemp(rl_method.reg.GetReg())) {
+ FreeTemp(rl_method.reg.GetReg());
}
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized.
// TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -420,9 +414,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -437,7 +431,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -449,20 +443,20 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
} else {
rl_src = LoadValue(rl_src, kAnyReg);
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
- rl_src.high_reg);
+ StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg(),
+ rl_src.reg.GetHighReg());
} else {
- StoreWordDisp(r_base, field_offset, rl_src.low_reg);
+ StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg());
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.low_reg, r_base);
+ MarkGCCard(rl_src.reg.GetReg(), r_base);
}
FreeTemp(r_base);
} else {
@@ -471,33 +465,27 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pSet32Static));
- CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+ CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
}
}
-void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -510,9 +498,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -527,7 +515,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -535,14 +523,14 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
}
// r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
- rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_offset, rl_result.low_reg);
+ LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg());
}
FreeTemp(r_base);
if (is_long_or_double) {
@@ -556,7 +544,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
:(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pGet32Static));
- CallRuntimeHelperImm(getterOffset, field_idx, true);
+ CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -698,36 +686,34 @@ void Mir2Lir::HandleThrowLaunchPads() {
}
}
-void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
-
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
RegLocation rl_result;
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
- rl_result.high_reg, rl_obj.s_reg_low);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
} else {
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
- if (is_volatile) {
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
+ INVALID_SREG);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
@@ -735,10 +721,10 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
- kWord, rl_obj.s_reg_low);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
StoreValue(rl_dest, rl_result);
@@ -748,7 +734,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
- CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+ CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -759,43 +745,42 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
}
}
-void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
- if (is_volatile) {
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
- if (is_volatile) {
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
- if (is_volatile) {
+ StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_src.reg.GetReg(), kWord);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg());
}
}
} else {
@@ -803,7 +788,8 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
- CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+ CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
+ rl_obj, rl_src, true);
}
}
@@ -829,23 +815,23 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.low_reg, true);
+ type_idx, rl_method.reg.GetReg(), true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
+ LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
+ LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg());
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- LIR* branch = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
// Object to generate the slow path for class resolution.
@@ -861,8 +847,8 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
GenerateTargetLabel();
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
- rl_method_.low_reg, true);
- m2l_->OpRegCopy(rl_result_.low_reg, m2l_->TargetReg(kRet0));
+ rl_method_.reg.GetReg(), true);
+ m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -900,8 +886,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
int r_method;
if (rl_method.location == kLocPhysReg) {
// A temp would conflict with register use below.
- DCHECK(!IsTemp(rl_method.low_reg));
- r_method = rl_method.low_reg;
+ DCHECK(!IsTemp(rl_method.reg.GetReg()));
+ r_method = rl_method.reg.GetReg();
} else {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
@@ -960,9 +946,9 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
int res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
+ LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
}
@@ -1035,12 +1021,12 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.low_reg;
- if (result_reg == object.low_reg) {
+ int result_reg = rl_result.reg.GetReg();
+ if (result_reg == object.reg.GetReg()) {
result_reg = AllocTypedTemp(false, kCoreReg);
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
int object_class = AllocTypedTemp(false, kCoreReg);
@@ -1049,11 +1035,11 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
if (use_declaring_class) {
LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
- LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1077,7 +1063,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.low_reg, result_reg);
+ OpRegCopy(rl_result.reg.GetReg(), result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1133,7 +1119,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
// On MIPS rArg0 != rl_result, place false in result if branch is taken.
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1147,12 +1133,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.low_reg, 1); // .eq case - load true
- LoadConstant(rl_result.low_reg, 0); // .ne case - load false
+ LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true
+ LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false
} else {
- LoadConstant(rl_result.low_reg, 0); // ne case - load false
+ LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false
branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
- LoadConstant(rl_result.low_reg, 1); // eq case - load true
+ LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true
}
} else {
if (cu_->instruction_set == kThumb2) {
@@ -1169,7 +1155,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else {
if (!type_known_abstract) {
/* Uses branchovers */
- LoadConstant(rl_result.low_reg, 1); // assume true
+ LoadConstant(rl_result.reg.GetReg(), 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
@@ -1355,16 +1341,16 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
+ if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) {
int t_reg = AllocTemp();
- OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
- OpRegCopy(rl_result.low_reg, t_reg);
+ OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
- rl_src2.high_reg);
+ OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(),
+ rl_src2.reg.GetHighReg());
}
/*
* NOTE: If rl_dest refers to a frame variable in a large frame, the
@@ -1487,22 +1473,22 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
if (unary) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
} else {
if (shift_op) {
int t_reg = INVALID_REG;
rl_src2 = LoadValue(rl_src2, kCoreReg);
t_reg = AllocTemp();
- OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
+ OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31);
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
}
StoreValue(rl_dest, rl_result);
@@ -1512,9 +1498,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
@@ -1523,9 +1509,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
done = true;
}
}
@@ -1585,29 +1571,29 @@ bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
int t_reg = AllocTemp();
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
- OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
} else {
- OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
+ OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31);
OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
- OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
}
} else {
int t_reg1 = AllocTemp();
int t_reg2 = AllocTemp();
if (lit == 2) {
- OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
} else {
- OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
+ OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31);
OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
}
}
StoreValue(rl_dest, rl_result);
@@ -1637,7 +1623,7 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (power_of_two) {
// Shift.
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit));
} else if (pop_count_le2) {
// Shift and add and shift.
int first_bit = LowestSetBit(lit);
@@ -1648,8 +1634,8 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li
DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1));
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg());
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1668,10 +1654,10 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set == kThumb2) {
- OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
} else {
- OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
- OpRegImm(kOpAdd, rl_result.low_reg, lit);
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit);
}
StoreValue(rl_dest, rl_result);
return;
@@ -1764,7 +1750,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
bool done = false;
if (cu_->instruction_set == kMips) {
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
done = true;
} else if (cu_->instruction_set == kX86) {
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
@@ -1774,7 +1760,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
done = true;
}
}
@@ -1800,9 +1786,9 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy.
if (shift_op && (lit == 0)) {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
+ OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
}
StoreValue(rl_dest, rl_result);
}
@@ -1822,15 +1808,15 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rl_result.low_reg == rl_src2.high_reg) {
+ if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) {
int t_reg = AllocTemp();
- OpRegCopy(t_reg, rl_src2.high_reg);
- OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
+ OpRegCopy(t_reg, rl_src2.reg.GetHighReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg());
}
StoreValueWide(rl_dest, rl_result);
return;
@@ -2003,7 +1989,7 @@ void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
/* Generic code for generating a wide constant into a VR. */
void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.low_reg, rl_result.high_reg, value);
+ LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 35d193c65c..dd3d466d94 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -284,9 +284,9 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.low_reg = TargetReg(kArg0);
+ rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
rl_src.home = false;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
@@ -680,7 +680,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
// Wide spans, we need the 2nd half of uses[2].
rl_arg = UpdateLocWide(rl_use2);
if (rl_arg.location == kLocPhysReg) {
- reg = rl_arg.high_reg;
+ reg = rl_arg.reg.GetHighReg();
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
@@ -701,8 +701,10 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
rl_arg = info->args[next_use];
rl_arg = UpdateRawLoc(rl_arg);
if (rl_arg.location == kLocPhysReg) {
- low_reg = rl_arg.low_reg;
- high_reg = rl_arg.high_reg;
+ low_reg = rl_arg.reg.GetReg();
+ if (rl_arg.wide) {
+ high_reg = rl_arg.reg.GetHighReg();
+ }
} else {
low_reg = TargetReg(kArg2);
if (rl_arg.wide) {
@@ -775,14 +777,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, loc.high_reg);
+ loc.reg.GetReg(), loc.reg.GetHighReg());
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, kWord);
+ loc.reg.GetReg(), kWord);
}
next_arg++;
}
@@ -983,7 +985,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
rl_idx = LoadValue(rl_idx, kCoreReg);
}
int reg_max;
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* launch_pad = NULL;
int reg_off = INVALID_REG;
@@ -993,15 +995,15 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
reg_ptr = AllocTemp();
if (range_check) {
reg_max = AllocTemp();
- LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
+ LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
}
- LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
- LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
- OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+ OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
FreeTemp(reg_max);
OpCondBranch(kCondUge, launch_pad);
}
@@ -1013,33 +1015,33 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
if (rl_idx.is_const) {
- OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.low_reg, count_offset,
+ OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
} else {
- OpRegMem(kOpCmp, rl_idx.low_reg, rl_obj.low_reg, count_offset);
+ OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
OpCondBranch(kCondUge, launch_pad);
}
}
reg_off = AllocTemp();
reg_ptr = AllocTemp();
- LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
- LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
} else {
- OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
+ OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
}
- FreeTemp(rl_obj.low_reg);
- if (rl_idx.low_reg != INVALID_REG) {
- FreeTemp(rl_idx.low_reg);
+ FreeTemp(rl_obj.reg.GetReg());
+ if (rl_idx.location == kLocPhysReg) {
+ FreeTemp(rl_idx.reg.GetReg());
}
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set != kX86) {
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+ LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
} else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.low_reg,
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
INVALID_REG, kUnsignedHalf, INVALID_SREG);
}
FreeTemp(reg_off);
@@ -1064,18 +1066,18 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
- LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
+ LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
int t_reg = AllocTemp();
- OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
- OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
+ OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
+ OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- OpRegImm(kOpSub, rl_result.low_reg, 1);
- OpRegImm(kOpLsr, rl_result.low_reg, 31);
+ OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
+ OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
}
}
StoreValue(rl_dest, rl_result);
@@ -1092,15 +1094,15 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
- int r_i_low = rl_i.low_reg;
- if (rl_i.low_reg == rl_result.low_reg) {
- // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV.
+ int r_i_low = rl_i.reg.GetReg();
+ if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
+ // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
r_i_low = AllocTemp();
- OpRegCopy(r_i_low, rl_i.low_reg);
+ OpRegCopy(r_i_low, rl_i.reg.GetReg());
}
- OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
- OpRegReg(kOpRev, rl_result.high_reg, r_i_low);
- if (rl_i.low_reg == rl_result.low_reg) {
+ OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
+ OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
+ if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
FreeTemp(r_i_low);
}
StoreValueWide(rl_dest, rl_result);
@@ -1108,7 +1110,7 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
DCHECK(size == kWord || size == kSignedHalf);
OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
- OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1125,9 +1127,9 @@ bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1144,11 +1146,11 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
- OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
} else {
@@ -1158,16 +1160,16 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
- OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
- OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31);
+ OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1184,7 +1186,7 @@ bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegRegReg(kOpAnd, rl_result.low_reg, rl_src.low_reg, signMask);
+ OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask);
FreeTemp(signMask);
StoreValue(rl_dest, rl_result);
return true;
@@ -1199,12 +1201,12 @@ bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegReg(kOpAnd, rl_result.high_reg, signMask);
+ OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask);
FreeTemp(signMask);
StoreValueWide(rl_dest, rl_result);
return true;
@@ -1316,10 +1318,10 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
ThreadOffset offset = Thread::PeerOffset();
if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
- LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
+ LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
} else {
CHECK(cu_->instruction_set == kX86);
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
+ reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1343,11 +1345,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
- OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
- LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
+ LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1372,20 +1374,20 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
- StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+ OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
+ StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
- FreeTemp(rl_offset.low_reg);
+ FreeTemp(rl_offset.reg.GetReg());
if (is_volatile) {
GenMemBarrier(kStoreLoad);
}
if (is_object) {
- MarkGCCard(rl_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
}
return true;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index f7c2821afd..3b79df99ce 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -92,7 +92,7 @@ LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) {
void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) {
rl_src = UpdateLoc(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(r_dest, rl_src.low_reg);
+ OpRegCopy(r_dest, rl_src.reg.GetReg());
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
@@ -122,7 +122,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo,
int reg_hi) {
rl_src = UpdateLocWide(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(reg_lo, reg_hi, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
} else {
@@ -150,9 +150,9 @@ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo,
RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirect(rl_src, rl_src.low_reg);
+ LoadValueDirect(rl_src, rl_src.reg.GetReg());
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
}
return rl_src;
}
@@ -175,34 +175,34 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
rl_src = UpdateLoc(rl_src);
rl_dest = UpdateLoc(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.low_reg) ||
- IsPromoted(rl_src.low_reg) ||
+ if (IsLive(rl_src.reg.GetReg()) ||
+ IsPromoted(rl_src.reg.GetReg()) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live/promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.low_reg = rl_src.low_reg;
- Clobber(rl_src.low_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirect(rl_src, rl_dest.low_reg);
+ LoadValueDirect(rl_src, rl_dest.reg.GetReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.low_reg) &&
+ if (IsDirty(rl_dest.reg.GetReg()) &&
oat_live_out(rl_dest.s_reg_low)) {
def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, kWord);
+ rl_dest.reg.GetReg(), kWord);
MarkClean(rl_dest);
def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -216,10 +216,10 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) {
DCHECK(rl_src.wide);
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg);
+ LoadValueDirectWide(rl_src, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
- MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetHighReg(), GetSRegHi(rl_src.s_reg_low));
}
return rl_src;
}
@@ -237,57 +237,59 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) {
}
LIR* def_start;
LIR* def_end;
- DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+ DCHECK((rl_src.location != kLocPhysReg) ||
+ (IsFpReg(rl_src.reg.GetReg()) == IsFpReg(rl_src.reg.GetHighReg())));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
+ rl_src = UpdateLocWide(rl_src);
+ rl_dest = UpdateLocWide(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.low_reg) ||
- IsLive(rl_src.high_reg) ||
- IsPromoted(rl_src.low_reg) ||
- IsPromoted(rl_src.high_reg) ||
+ if (IsLive(rl_src.reg.GetReg()) ||
+ IsLive(rl_src.reg.GetHighReg()) ||
+ IsPromoted(rl_src.reg.GetReg()) ||
+ IsPromoted(rl_src.reg.GetHighReg()) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live or promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg,
- rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(),
+ rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.low_reg = rl_src.low_reg;
- rl_dest.high_reg = rl_src.high_reg;
- Clobber(rl_src.low_reg);
- Clobber(rl_src.high_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg.GetHighReg());
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg);
+ LoadValueDirectWide(rl_src, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.low_reg != rl_dest.high_reg) {
- MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.low_reg, rl_dest.high_reg);
+ MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.low_reg) ||
- IsDirty(rl_dest.high_reg)) &&
+ if ((IsDirty(rl_dest.reg.GetReg()) ||
+ IsDirty(rl_dest.reg.GetHighReg())) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, rl_dest.high_reg);
+ rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -298,25 +300,25 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
} else {
// Just re-assign the register. Dest gets Src's reg.
- rl_dest.low_reg = rl_src.low_reg;
rl_dest.location = kLocPhysReg;
- Clobber(rl_src.low_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.low_reg) &&
+ if (IsDirty(rl_dest.reg.GetReg()) &&
oat_live_out(rl_dest.s_reg_low)) {
LIR *def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, kWord);
+ rl_dest.reg.GetReg(), kWord);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -327,46 +329,45 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
}
void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
- DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+ DCHECK_EQ(IsFpReg(rl_src.reg.GetReg()), IsFpReg(rl_src.reg.GetHighReg()));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
// Just re-assign the registers. Dest gets Src's regs.
- rl_dest.low_reg = rl_src.low_reg;
- rl_dest.high_reg = rl_src.high_reg;
rl_dest.location = kLocPhysReg;
- Clobber(rl_src.low_reg);
- Clobber(rl_src.high_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg.GetHighReg());
}
// Dest is now live and dirty (until/if we flush it to home location).
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.low_reg != rl_dest.high_reg) {
- MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.low_reg, rl_dest.high_reg);
+ MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.low_reg) ||
- IsDirty(rl_dest.high_reg)) &&
+ if ((IsDirty(rl_dest.reg.GetReg()) ||
+ IsDirty(rl_dest.reg.GetHighReg())) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
LIR *def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, rl_dest.high_reg);
+ rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -385,14 +386,13 @@ RegLocation Mir2Lir::LoadCurrMethod() {
RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
DCHECK(!loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.low_reg));
- DCHECK(!IsFpReg(loc.high_reg));
- if (IsTemp(loc.low_reg)) {
- Clobber(loc.low_reg);
+ DCHECK(!IsFpReg(loc.reg.GetReg()));
+ if (IsTemp(loc.reg.GetReg())) {
+ Clobber(loc.reg.GetReg());
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.low_reg);
- loc.low_reg = temp_low;
+ OpRegCopy(temp_low, loc.reg.GetReg());
+ loc.reg.SetReg(temp_low);
}
// Ensure that this doesn't represent the original SR any more.
@@ -403,21 +403,21 @@ RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
DCHECK(loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.low_reg));
- DCHECK(!IsFpReg(loc.high_reg));
- if (IsTemp(loc.low_reg)) {
- Clobber(loc.low_reg);
+ DCHECK(!IsFpReg(loc.reg.GetReg()));
+ DCHECK(!IsFpReg(loc.reg.GetHighReg()));
+ if (IsTemp(loc.reg.GetReg())) {
+ Clobber(loc.reg.GetReg());
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.low_reg);
- loc.low_reg = temp_low;
+ OpRegCopy(temp_low, loc.reg.GetReg());
+ loc.reg.SetReg(temp_low);
}
- if (IsTemp(loc.high_reg)) {
- Clobber(loc.high_reg);
+ if (IsTemp(loc.reg.GetHighReg())) {
+ Clobber(loc.reg.GetHighReg());
} else {
int temp_high = AllocTemp();
- OpRegCopy(temp_high, loc.high_reg);
- loc.high_reg = temp_high;
+ OpRegCopy(temp_high, loc.reg.GetHighReg());
+ loc.reg.SetHighReg(temp_high);
}
// Ensure that this doesn't represent the original SR any more.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index a663519b82..88f46fd59a 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -114,7 +114,7 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
LoadWordDisp(rBase, 0, r_key);
OpRegImm(kOpAdd, rBase, 8);
- OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
+ OpCmpBranch(kCondNe, rl_src.reg.GetReg(), r_key, loop_label);
int r_disp = AllocTemp();
LoadWordDisp(rBase, -4, r_disp);
OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
@@ -162,7 +162,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
bool large_bias = false;
int r_key;
if (low_key == 0) {
- r_key = rl_src.low_reg;
+ r_key = rl_src.reg.GetReg();
} else if ((low_key & 0xffff) != low_key) {
r_key = AllocTemp();
LoadConstant(r_key, low_key);
@@ -179,9 +179,9 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
NewLIR0(kMipsNop);
} else {
if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
+ OpRegRegReg(kOpSub, r_key, rl_src.reg.GetReg(), r_key);
} else {
- OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, r_key, rl_src.reg.GetReg(), low_key);
}
}
GenBarrier(); // Scheduling barrier
@@ -263,7 +263,7 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
+ LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.reg.GetReg());
LoadConstant(reset_reg, 0);
StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index dad8a3b492..61eb68dc21 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -49,7 +49,7 @@ class MipsMir2Lir : public Mir2Lir {
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 9e2fea94de..cf4f19f84c 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -64,7 +64,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -157,18 +157,18 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -221,7 +221,7 @@ void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -229,8 +229,8 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 013041a9a5..fec801bb4a 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -47,13 +47,13 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
int t0 = AllocTemp();
int t1 = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
- NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
- NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL);
- NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
- NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
- NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+ NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg.GetReg(), 0, NULL);
+ NewLIR3(kMipsSltu, t0, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMipsSltu, t1, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
FreeTemp(t0);
FreeTemp(t1);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -228,9 +228,9 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
}
return rl_result;
}
@@ -242,9 +242,9 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
}
FreeTemp(t_reg);
return rl_result;
@@ -290,7 +290,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -306,7 +306,7 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
return true;
}
@@ -329,11 +329,11 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -385,11 +385,11 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
* addu v1,v1,t1
*/
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
int t_reg = AllocTemp();
- OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
- NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+ OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpAdd, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -408,10 +408,10 @@ void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
*/
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ NewLIR3(kMipsSltu, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -427,11 +427,11 @@ void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
* subu v1,v1,t1
*/
- OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
- OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg);
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -471,7 +471,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
int reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -479,28 +479,28 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
- FreeTemp(rl_array.low_reg);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ FreeTemp(rl_array.reg.GetReg());
if ((size == kLong) || (size == kDouble)) {
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
}
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -508,10 +508,10 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -538,17 +538,17 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_index = LoadValue(rl_index, kCoreReg);
int reg_ptr = INVALID_REG;
bool allocated_reg_ptr_temp = false;
- if (IsTemp(rl_array.low_reg) && !card_mark) {
- Clobber(rl_array.low_reg);
- reg_ptr = rl_array.low_reg;
+ if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
+ Clobber(rl_array.reg.GetReg());
+ reg_ptr = rl_array.reg.GetReg();
} else {
reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.low_reg);
+ OpRegCopy(reg_ptr, rl_array.reg.GetReg());
allocated_reg_ptr_temp = true;
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -556,7 +556,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegImm(kOpAdd, reg_ptr, data_offset);
@@ -565,34 +565,34 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
// TUNING: specific wide routine that can handle fp regs
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
}
rl_src = LoadValueWide(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 00eef96cb5..59f442c61a 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -141,16 +141,6 @@ namespace art {
#define rMIPS_LR INVALID_REG
#define rMIPS_PC INVALID_REG
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_V0, INVALID_REG, \
- INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
- INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_RESULT0, \
- r_RESULT1, INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
- r_FRESULT1, INVALID_SREG, INVALID_SREG}
-
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
kMipsRegSP = 29,
@@ -279,6 +269,20 @@ enum MipsNativeRegisterPool {
#define rMIPS_INVOKE_TGT r_T9
#define rMIPS_COUNT INVALID_REG
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+const RegLocation mips_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r_V0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r_V0, r_V1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r_F0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r_F0, r_F1), INVALID_SREG, INVALID_SREG};
+
enum MipsShiftEncodings {
kMipsLsl = 0x0,
kMipsLsr = 0x1,
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 224e8f21f2..85c250da0f 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -40,23 +40,19 @@ static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
RegLocation MipsMir2Lir::LocCReturn() {
- RegLocation res = MIPS_LOC_C_RETURN;
- return res;
+ return mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnWide() {
- RegLocation res = MIPS_LOC_C_RETURN_WIDE;
- return res;
+ return mips_loc_c_return_wide;
}
RegLocation MipsMir2Lir::LocCReturnFloat() {
- RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
- return res;
+ return mips_loc_c_return_float;
}
RegLocation MipsMir2Lir::LocCReturnDouble() {
- RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
- return res;
+ return mips_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -441,27 +437,20 @@ void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#endif
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int MipsMir2Lir::AllocTypedTempPair(bool fp_hint,
- int reg_class) {
+// Alloc a pair of core registers, or a double.
+RegStorage MipsMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg + 1;
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
low_reg = AllocTemp();
high_reg = AllocTemp();
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -505,11 +494,11 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
}
void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
/*
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 8c2ed3667b..d9b241e864 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -53,8 +53,9 @@ int Mir2Lir::LoadArg(int in_position, bool wide) {
if (wide && reg_arg_high == INVALID_REG) {
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (reg_arg_low == INVALID_REG) {
- int new_regs = AllocTypedTempPair(false, kAnyReg);
- DECODE_REG_PAIR(new_regs, reg_arg_low, reg_arg_high);
+ RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
+ reg_arg_low = new_regs.GetReg();
+ reg_arg_high = new_regs.GetHighReg();
LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG);
} else {
reg_arg_high = AllocTemp();
@@ -70,6 +71,7 @@ int Mir2Lir::LoadArg(int in_position, bool wide) {
}
if (wide) {
+ // TODO: replace w/ RegStorage.
return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high);
} else {
return reg_arg_low;
@@ -90,25 +92,25 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
if (!rl_dest.wide) {
int reg = GetArgMappingToPhysicalReg(in_position);
if (reg != INVALID_REG) {
- OpRegCopy(rl_dest.low_reg, reg);
+ OpRegCopy(rl_dest.reg.GetReg(), reg);
} else {
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
}
} else {
int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg_arg_low, reg_arg_high);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), reg_arg_low, reg_arg_high);
} else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) {
- OpRegCopy(rl_dest.low_reg, reg_arg_low);
+ OpRegCopy(rl_dest.reg.GetReg(), reg_arg_low);
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.high_reg);
+ LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHighReg());
} else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopy(rl_dest.high_reg, reg_arg_high);
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
+ OpRegCopy(rl_dest.reg.GetHighReg(), reg_arg_high);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
} else {
- LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
}
}
}
@@ -131,9 +133,9 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
int reg_obj = LoadArg(data.object_arg);
if (wide) {
- LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg.GetReg(), kWord, INVALID_SREG);
}
if (data.is_volatile) {
GenMemBarrier(kLoadLoad);
@@ -210,7 +212,7 @@ bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& speci
successful = true;
RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F');
GenPrintLabel(mir);
- LoadConstant(rl_dest.low_reg, static_cast<int>(special.d.data));
+ LoadConstant(rl_dest.reg.GetReg(), static_cast<int>(special.d.data));
return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir);
break;
}
@@ -231,6 +233,11 @@ bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& speci
}
if (successful) {
+ if (kIsDebugBuild) {
+ // Clear unreachable catch entries.
+ mir_graph_->catches_.clear();
+ }
+
// Handle verbosity for return MIR.
if (return_mir != nullptr) {
current_dalvik_offset_ = return_mir->offset;
@@ -371,19 +378,19 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::CONST_4:
case Instruction::CONST_16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.low_reg, vB);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), vB);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.low_reg);
+ Workaround7250540(rl_dest, rl_result.reg.GetReg());
}
break;
case Instruction::CONST_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.low_reg, vB << 16);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), vB << 16);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.low_reg);
+ Workaround7250540(rl_dest, rl_result.reg.GetReg());
}
break;
@@ -398,7 +405,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::CONST_WIDE_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
+ LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
static_cast<int64_t>(vB) << 48);
StoreValueWide(rl_dest, rl_result);
break;
@@ -431,9 +438,9 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+ GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg);
+ LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
break;
@@ -596,72 +603,72 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IGET_OBJECT:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
- GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
- GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_SHORT:
- GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
- GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
break;
case Instruction::IPUT_WIDE:
- GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
- GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_CHAR:
- GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_SHORT:
- GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::SGET_OBJECT:
- GenSget(vB, rl_dest, false, true);
+ GenSget(mir, rl_dest, false, true);
break;
case Instruction::SGET:
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT:
- GenSget(vB, rl_dest, false, false);
+ GenSget(mir, rl_dest, false, false);
break;
case Instruction::SGET_WIDE:
- GenSget(vB, rl_dest, true, false);
+ GenSget(mir, rl_dest, true, false);
break;
case Instruction::SPUT_OBJECT:
- GenSput(vB, rl_src[0], false, true);
+ GenSput(mir, rl_src[0], false, true);
break;
case Instruction::SPUT:
@@ -669,11 +676,11 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT:
- GenSput(vB, rl_src[0], false, false);
+ GenSput(mir, rl_src[0], false, false);
break;
case Instruction::SPUT_WIDE:
- GenSput(vB, rl_src[0], true, false);
+ GenSput(mir, rl_src[0], true, false);
break;
case Instruction::INVOKE_STATIC_RANGE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b74052c117..9e0e29995e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -21,6 +21,7 @@
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
+#include "dex/reg_storage.h"
#include "dex/backend.h"
#include "driver/compiler_driver.h"
#include "leb128.h"
@@ -395,7 +396,6 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
void SetupResourceMasks(LIR* lir);
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -425,6 +425,7 @@ class Mir2Lir : public Backend {
bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
bool IsInexpensiveConstant(RegLocation rl_src);
ConditionCode FlipComparisonOrder(ConditionCode before);
+ ConditionCode NegateComparison(ConditionCode before);
virtual void InstallLiteralPools();
void InstallSwitchTables();
void InstallFillArrayData();
@@ -558,13 +559,13 @@ class Mir2Lir : public Backend {
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
void GenFilledNewArray(CallInfo* info);
- void GenSput(uint32_t field_idx, RegLocation rl_src,
+ void GenSput(MIR* mir, RegLocation rl_src,
bool is_long_or_double, bool is_object);
- void GenSget(uint32_t field_idx, RegLocation rl_dest,
+ void GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object);
- void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
- void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src);
@@ -813,7 +814,7 @@ class Mir2Lir : public Backend {
virtual bool IsFpReg(int reg) = 0;
virtual bool SameRegType(int reg1, int reg2) = 0;
virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
- virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
+ virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0;
virtual int S2d(int low_reg, int high_reg) = 0;
virtual int TargetReg(SpecialTargetRegister reg) = 0;
virtual int GetArgMappingToPhysicalReg(int arg_num) = 0;
@@ -1089,6 +1090,11 @@ class Mir2Lir : public Backend {
bool can_assume_type_is_in_dex_cache,
uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
+ /*
+ * @brief Generate the debug_frame FDE information if possible.
+ * @returns pointer to vector containg CFE information, or NULL.
+ */
+ virtual std::vector<uint8_t>* ReturnCallFrameInformation();
/**
* @brief Used to insert marker that can be used to associate MIR with LIR.
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 0a651713ab..3a8942e46e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -460,7 +460,7 @@ void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) {
DCHECK(!rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.low_reg);
+ RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
p->def_start = start->next;
p->def_end = finish;
}
@@ -474,8 +474,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
DCHECK(rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.low_reg);
- ResetDef(rl.high_reg); // Only track low of pair
+ RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg()); // Only track low of pair
p->def_start = start->next;
p->def_end = finish;
}
@@ -483,8 +483,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
DCHECK(rl.wide);
if (rl.location == kLocPhysReg) {
- RegisterInfo* info_lo = GetRegInfo(rl.low_reg);
- RegisterInfo* info_hi = GetRegInfo(rl.high_reg);
+ RegisterInfo* info_lo = GetRegInfo(rl.reg.GetReg());
+ RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg());
if (info_lo->is_temp) {
info_lo->pair = false;
info_lo->def_start = NULL;
@@ -502,18 +502,18 @@ RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
void Mir2Lir::ResetDefLoc(RegLocation rl) {
DCHECK(!rl.wide);
- RegisterInfo* p = IsTemp(rl.low_reg);
+ RegisterInfo* p = IsTemp(rl.reg.GetReg());
if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(!p->pair);
NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
}
- ResetDef(rl.low_reg);
+ ResetDef(rl.reg.GetReg());
}
void Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.low_reg);
- RegisterInfo* p_high = IsTemp(rl.high_reg);
+ RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
+ RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -521,8 +521,8 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) {
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.low_reg);
- ResetDef(rl.high_reg);
+ ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg());
}
void Mir2Lir::ResetDefTracking() {
@@ -621,10 +621,10 @@ void Mir2Lir::MarkPair(int low_reg, int high_reg) {
}
void Mir2Lir::MarkClean(RegLocation loc) {
- RegisterInfo* info = GetRegInfo(loc.low_reg);
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
info->dirty = false;
if (loc.wide) {
- info = GetRegInfo(loc.high_reg);
+ info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = false;
}
}
@@ -634,10 +634,10 @@ void Mir2Lir::MarkDirty(RegLocation loc) {
// If already home, can't be dirty
return;
}
- RegisterInfo* info = GetRegInfo(loc.low_reg);
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
info->dirty = true;
if (loc.wide) {
- info = GetRegInfo(loc.high_reg);
+ info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = true;
}
}
@@ -707,7 +707,7 @@ RegLocation Mir2Lir::UpdateLoc(RegLocation loc) {
Clobber(info_lo->partner);
FreeTemp(info_lo->reg);
} else {
- loc.low_reg = info_lo->reg;
+ loc.reg = RegStorage(RegStorage::k32BitSolo, info_lo->reg);
loc.location = kLocPhysReg;
}
}
@@ -744,11 +744,10 @@ RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) {
}
if (match) {
// Can reuse - update the register usage info
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_hi->reg;
loc.location = kLocPhysReg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -779,7 +778,6 @@ RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) {
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -787,22 +785,21 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
/* If already in registers, we can assume proper form. Right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetReg()), IsFpReg(loc.reg.GetHighReg()));
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Reallocate and copy */
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- low_reg = new_regs & 0xff;
- high_reg = (new_regs >> 8) & 0xff;
- OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- CopyRegInfo(high_reg, loc.high_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = high_reg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
+ low_reg = new_regs.GetReg();
+ high_reg = new_regs.GetHighReg();
+ OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ CopyRegInfo(high_reg, loc.reg.GetHighReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg = new_regs;
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
}
return loc;
}
@@ -810,20 +807,18 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- loc.low_reg = new_regs & 0xff;
- loc.high_reg = (new_regs >> 8) & 0xff;
+ loc.reg = AllocTypedTempWide(loc.fp, reg_class);
- MarkPair(loc.low_reg, loc.high_reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
// Does this wide value live in two registers or one vector register?
- if (loc.low_reg != loc.high_reg) {
- MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
+ if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
}
}
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
@@ -836,13 +831,13 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Realloc, copy and transfer ownership */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.low_reg);
- CopyRegInfo(new_reg, loc.low_reg);
- Clobber(loc.low_reg);
- loc.low_reg = new_reg;
+ OpRegCopy(new_reg, loc.reg.GetReg());
+ CopyRegInfo(new_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
}
return loc;
}
@@ -850,11 +845,11 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.low_reg = new_reg;
+ loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
}
return loc;
}
@@ -1006,32 +1001,29 @@ void Mir2Lir::DoPromotion() {
if (curr->fp) {
if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].FpReg;
+ curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].FpReg);
curr->home = true;
}
} else {
if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].core_reg;
+ curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].core_reg);
curr->home = true;
}
}
- curr->high_reg = INVALID_REG;
} else {
if (curr->high_word) {
continue;
}
if (curr->fp) {
if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) &&
- (promotion_map_[p_map_idx+1].fp_location ==
- kLocPhysReg)) {
+ (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg)) {
int low_reg = promotion_map_[p_map_idx].FpReg;
int high_reg = promotion_map_[p_map_idx+1].FpReg;
// Doubles require pair of singles starting at even reg
if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
curr->location = kLocPhysReg;
- curr->low_reg = low_reg;
- curr->high_reg = high_reg;
+ curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
curr->home = true;
}
}
@@ -1040,8 +1032,8 @@ void Mir2Lir::DoPromotion() {
&& (promotion_map_[p_map_idx+1].core_location ==
kLocPhysReg)) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].core_reg;
- curr->high_reg = promotion_map_[p_map_idx+1].core_reg;
+ curr->reg = RegStorage(RegStorage::k64BitPair, promotion_map_[p_map_idx].core_reg,
+ promotion_map_[p_map_idx+1].core_reg);
curr->home = true;
}
}
@@ -1068,13 +1060,13 @@ RegLocation Mir2Lir::GetReturnWide(bool is_double) {
RegLocation gpr_res = LocCReturnWide();
RegLocation fpr_res = LocCReturnDouble();
RegLocation res = is_double ? fpr_res : gpr_res;
- Clobber(res.low_reg);
- Clobber(res.high_reg);
- LockTemp(res.low_reg);
- LockTemp(res.high_reg);
+ Clobber(res.reg.GetReg());
+ Clobber(res.reg.GetHighReg());
+ LockTemp(res.reg.GetReg());
+ LockTemp(res.reg.GetHighReg());
// Does this wide value live in two registers or one vector register?
- if (res.low_reg != res.high_reg) {
- MarkPair(res.low_reg, res.high_reg);
+ if (res.reg.GetReg() != res.reg.GetHighReg()) {
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
}
return res;
}
@@ -1083,11 +1075,11 @@ RegLocation Mir2Lir::GetReturn(bool is_float) {
RegLocation gpr_res = LocCReturn();
RegLocation fpr_res = LocCReturnFloat();
RegLocation res = is_float ? fpr_res : gpr_res;
- Clobber(res.low_reg);
+ Clobber(res.reg.GetReg());
if (cu_->instruction_set == kMips) {
- MarkInUse(res.low_reg);
+ MarkInUse(res.reg.GetReg());
} else {
- LockTemp(res.low_reg);
+ LockTemp(res.reg.GetReg());
}
return res;
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 0613cdff7a..c92d2bb730 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -40,7 +40,7 @@ void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
int key = keys[i];
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
+ OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key,
&block_label_list_[case_block->id]);
}
}
@@ -87,7 +87,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
rl_method = LoadValue(rl_method, kCoreReg);
- start_of_method_reg = rl_method.low_reg;
+ start_of_method_reg = rl_method.reg.GetReg();
store_method_addr_used_ = true;
} else {
start_of_method_reg = AllocTemp();
@@ -97,10 +97,10 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.low_reg;
+ keyReg = rl_src.reg.GetReg();
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -164,7 +164,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
+ NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
NewLIR2(kX86Mov32TI, ex_offset, 0);
StoreValue(rl_dest, rl_result);
}
@@ -198,7 +198,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
LockTemp(rX86_ARG2);
/* Build frame, return address already on stack */
- OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
+ stack_decrement_ = OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
/*
* We can safely skip the stack overflow check if we're
@@ -246,7 +246,7 @@ void X86Mir2Lir::GenExitSequence() {
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
/* Remove frame except for return address */
- OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+ stack_increment_ = OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
NewLIR0(kX86Ret);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 421d51e4fd..7cc2c08b96 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -48,8 +48,9 @@ class X86Mir2Lir : public Mir2Lir {
// Required for target - register utilities.
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
+ // TODO: for consistency, make this return a RegStorage as well?
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
@@ -302,6 +303,18 @@ class X86Mir2Lir : public Mir2Lir {
*/
void InstallLiteralPools();
+ /*
+ * @brief Generate the debug_frame CFI information.
+ * @returns pointer to vector containing CFE information
+ */
+ static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
+
+ /*
+ * @brief Generate the debug_frame FDE information.
+ * @returns pointer to vector containing CFE information
+ */
+ std::vector<uint8_t>* ReturnCallFrameInformation();
+
private:
void EmitPrefix(const X86EncodingMap* entry);
void EmitOpcode(const X86EncodingMap* entry);
@@ -549,6 +562,12 @@ class X86Mir2Lir : public Mir2Lir {
// Instructions needing patching with PC relative code addresses.
GrowableArray<LIR*> call_method_insns_;
+
+ // Prologue decrement of stack pointer.
+ LIR* stack_decrement_;
+
+ // Epilogue increment of stack pointer.
+ LIR* stack_increment_;
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 4c2ecc0efd..1827901b1b 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -63,9 +63,9 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- int r_dest = rl_result.low_reg;
- int r_src1 = rl_src1.low_reg;
- int r_src2 = rl_src2.low_reg;
+ int r_dest = rl_result.reg.GetReg();
+ int r_src1 = rl_src1.reg.GetReg();
+ int r_src2 = rl_src2.reg.GetReg();
if (r_dest == r_src2) {
r_src2 = AllocTempFloat();
OpRegCopy(r_src2, r_dest);
@@ -118,9 +118,9 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
- int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
- int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg());
+ int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
if (r_dest == r_src2) {
r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
OpRegCopy(r_src2, r_dest);
@@ -140,7 +140,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
// If the source is in physical register, then put it in its location on stack.
if (rl_src.location == kLocPhysReg) {
- RegisterInfo* lo_info = GetRegInfo(rl_src.low_reg);
+ RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg());
if (lo_info != nullptr && lo_info->is_temp) {
// Calling FlushSpecificReg because it will only write back VR if it is dirty.
@@ -148,7 +148,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
}
}
@@ -181,13 +181,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
if (is_double) {
rl_result = EvalLocWide(rl_dest, kFPReg, true);
- LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg);
+ LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -219,21 +219,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
break;
case Instruction::FLOAT_TO_INT: {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempFloat();
- LoadConstant(rl_result.low_reg, 0x7fffffff);
- NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComissRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+ NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -241,21 +241,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
case Instruction::DOUBLE_TO_INT: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
- LoadConstant(rl_result.low_reg, 0x7fffffff);
- NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComisdRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+ NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -278,18 +278,18 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, rcSrc);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, rcSrc);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -302,19 +302,19 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
int src_reg2;
if (single) {
rl_src1 = LoadValue(rl_src1, kFPReg);
- src_reg1 = rl_src1.low_reg;
+ src_reg1 = rl_src1.reg.GetReg();
rl_src2 = LoadValue(rl_src2, kFPReg);
- src_reg2 = rl_src2.low_reg;
+ src_reg2 = rl_src2.reg.GetReg();
} else {
rl_src1 = LoadValueWide(rl_src1, kFPReg);
- src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
}
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0);
if (single) {
NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
} else {
@@ -325,20 +325,20 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
- if (rl_result.low_reg >= 4) {
+ if (rl_result.reg.GetReg() >= 4) {
LIR* branch2 = NULL;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
- NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
+ NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
} else {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
- NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
+ NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
}
branch2->target = NewLIR0(kPseudoTargetLabel);
} else {
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
}
- NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
+ NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
if (unordered_gt) {
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -357,14 +357,14 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
ConditionCode ccode = mir->meta.ccode;
switch (ccode) {
@@ -418,7 +418,7 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -426,8 +426,8 @@ void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValueWide(rl_dest, rl_result);
}
@@ -436,8 +436,8 @@ bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kX86SqrtsdRR, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
return true;
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 5f04b7d152..d12c05799a 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -190,6 +190,7 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
rl_src = LoadValue(rl_src, kCoreReg);
+ ConditionCode ccode = mir->meta.ccode;
// The kMirOpSelect has two variants, one for constants and one for moves.
const bool is_constant_case = (mir->ssa_rep->num_uses == 1);
@@ -200,6 +201,8 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
rl_result = EvalLoc(rl_dest, kCoreReg, true);
/*
+ * For ccode == kCondEq:
+ *
* 1) When the true case is zero and result_reg is not same as src_reg:
* xor result_reg, result_reg
* cmp $0, src_reg
@@ -212,34 +215,34 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
* cmovz result_reg, t1
* 3) All other cases (we do compare first to set eflags):
* cmp $0, src_reg
- * mov result_reg, $true_case
- * mov t1, $false_case
- * cmovnz result_reg, t1
+ * mov result_reg, $false_case
+ * mov t1, $true_case
+ * cmovz result_reg, t1
*/
- const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.low_reg == rl_result.low_reg);
+ const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
const bool catch_all_case = !(true_zero_case || false_zero_case);
if (true_zero_case || false_zero_case) {
- OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
}
if (true_zero_case || false_zero_case || catch_all_case) {
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
}
if (catch_all_case) {
- OpRegImm(kOpMov, rl_result.low_reg, true_val);
+ OpRegImm(kOpMov, rl_result.reg.GetReg(), false_val);
}
if (true_zero_case || false_zero_case || catch_all_case) {
- int immediateForTemp = false_zero_case ? true_val : false_val;
+ ConditionCode cc = true_zero_case ? NegateComparison(ccode) : ccode;
+ int immediateForTemp = true_zero_case ? false_val : true_val;
int temp1_reg = AllocTemp();
OpRegImm(kOpMov, temp1_reg, immediateForTemp);
- ConditionCode cc = false_zero_case ? kCondEq : kCondNe;
- OpCondRegReg(kOpCmov, cc, rl_result.low_reg, temp1_reg);
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg);
FreeTemp(temp1_reg);
}
@@ -251,6 +254,8 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
rl_result = EvalLoc(rl_dest, kCoreReg, true);
/*
+ * For ccode == kCondEq:
+ *
* 1) When true case is already in place:
* cmp $0, src_reg
* cmovnz result_reg, false_reg
@@ -259,20 +264,20 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
* cmovz result_reg, true_reg
* 3) When neither cases are in place:
* cmp $0, src_reg
- * mov result_reg, true_reg
- * cmovnz result_reg, false_reg
+ * mov result_reg, false_reg
+ * cmovz result_reg, true_reg
*/
// kMirOpSelect is generated just for conditional cases when comparison is done with zero.
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
- if (rl_result.low_reg == rl_true.low_reg) {
- OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
- } else if (rl_result.low_reg == rl_false.low_reg) {
- OpCondRegReg(kOpCmov, kCondEq, rl_result.low_reg, rl_true.low_reg);
+ if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
+ OpCondRegReg(kOpCmov, NegateComparison(ccode), rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
+ OpCondRegReg(kOpCmov, ccode, rl_result.reg.GetReg(), rl_true.reg.GetReg());
} else {
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpCondRegReg(kOpCmov, ccode, rl_result.reg.GetReg(), rl_true.reg.GetReg());
}
}
@@ -337,8 +342,8 @@ void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.low_reg;
- int32_t high_reg = rl_src1.high_reg;
+ int32_t low_reg = rl_src1.reg.GetReg();
+ int32_t high_reg = rl_src1.reg.GetHighReg();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -461,7 +466,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// Assume that the result will be in EDX.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- r2, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG};
// handle div/rem by 1 special case.
if (imm == 1) {
@@ -472,7 +477,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// x % 1 == 0.
LoadConstantNoClobber(r0, 0);
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
}
} else if (imm == -1) { // handle 0x80000000 / -1 special case.
if (is_div) {
@@ -494,7 +499,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
LoadConstantNoClobber(r0, 0);
}
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
} else {
CHECK(imm <= -2 || imm >= 2);
// Use H.S.Warren's Hacker's Delight Chapter 10 and
@@ -524,8 +529,8 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// We will need the value later.
if (rl_src.location == kLocPhysReg) {
// We can use it directly.
- DCHECK(rl_src.low_reg != r0 && rl_src.low_reg != r2);
- numerator_reg = rl_src.low_reg;
+ DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
+ numerator_reg = rl_src.reg.GetReg();
} else {
LoadValueDirectFixed(rl_src, r1);
numerator_reg = r1;
@@ -582,7 +587,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
NewLIR2(kX86Sub32RR, r0, r2);
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
}
}
@@ -638,9 +643,9 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
// Result is in EAX for div and EDX for rem.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- r0, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
if (!is_div) {
- rl_result.low_reg = r2;
+ rl_result.reg.SetReg(r2);
}
return rl_result;
}
@@ -662,22 +667,22 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
* The reason is that the first copy will inadvertently clobber the second element with
* the first one thus yielding the wrong result. Thus we do a swap in that case.
*/
- if (rl_result.low_reg == rl_src2.low_reg) {
+ if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
std::swap(rl_src1, rl_src2);
}
// Pick the first integer as min/max.
- OpRegCopy(rl_result.low_reg, rl_src1.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg());
// If the integers are both in the same register, then there is nothing else to do
// because they are equal and we have already moved one into the result.
- if (rl_src1.low_reg != rl_src2.low_reg) {
+ if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
// It is possible we didn't pick correctly so do the actual comparison now.
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
// Conditionally move the other integer into the destination register.
ConditionCode condition_code = is_min ? kCondGt : kCondLt;
- OpCondRegReg(kOpCmov, condition_code, rl_result.low_reg, rl_src2.low_reg);
+ OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
}
StoreValue(rl_dest, rl_result);
@@ -692,12 +697,12 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Unaligned access is allowed on x86.
- LoadBaseDispWide(rl_address.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -711,12 +716,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == kLong) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDispWide(rl_address.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+ StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
}
return true;
}
@@ -776,13 +781,13 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
FreeTemp(r0); // Temporarily release EAX for MarkGCCard().
- MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
LockTemp(r0);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
LoadValueDirect(rl_src_expected, r0);
- NewLIR5(kX86LockCmpxchgAR, rl_object.low_reg, rl_offset.low_reg, 0, 0, rl_new_value.low_reg);
+ NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
FreeTemp(r0);
}
@@ -790,8 +795,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
// Convert ZF to boolean
RegLocation rl_dest = InlineTarget(info); // boolean place for result
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondZ);
- NewLIR2(kX86Movzx8RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondZ);
+ NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
return true;
}
@@ -830,11 +835,11 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -918,12 +923,11 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
int64_t val = mir_graph_->ConstantValueWide(rl_src2);
if (val == 0) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
- OpRegReg(kOpXor, rl_result.high_reg, rl_result.high_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());
StoreValueWide(rl_dest, rl_result);
return;
} else if (val == 1) {
- rl_src1 = EvalLocWide(rl_src1, kCoreReg, true);
StoreValueWide(rl_dest, rl_src1);
return;
} else if (val == 2) {
@@ -952,8 +956,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- 1H * 2L
// EAX <- 1L * 2H
if (src1_in_reg) {
- GenImulRegImm(r1, rl_src1.high_reg, val_lo);
- GenImulRegImm(r0, rl_src1.low_reg, val_hi);
+ GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo);
+ GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi);
} else {
GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
@@ -967,7 +971,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
} else {
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -978,7 +982,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r2),
INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
return;
@@ -1000,7 +1005,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- 1H
if (src1_in_reg) {
- NewLIR2(kX86Mov32RR, r1, rl_src1.high_reg);
+ NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1,
kWord, GetSRegHi(rl_src1.s_reg_low));
@@ -1010,7 +1015,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// Take advantage of the fact that the values are the same.
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1023,7 +1028,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
} else {
// EAX <- 2H
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg);
+ NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0,
kWord, GetSRegHi(rl_src2.s_reg_low));
@@ -1031,7 +1036,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EAX <- EAX * 1L (2H * 1L)
if (src1_in_reg) {
- NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg);
+ NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1041,7 +1046,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1055,7 +1060,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EAX <- 2L
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.low_reg);
+ NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0,
kWord, rl_src2.s_reg_low);
@@ -1063,7 +1068,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1075,8 +1080,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
- INVALID_SREG, INVALID_SREG};
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
}
@@ -1086,18 +1091,18 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
if (rl_src.location == kLocPhysReg) {
// Both operands are in registers.
- if (rl_dest.low_reg == rl_src.high_reg) {
+ if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_dest.low_reg);
- rl_src.high_reg = temp_reg;
+ OpRegCopy(temp_reg, rl_dest.reg.GetReg());
+ rl_src.reg.SetHighReg(temp_reg);
}
- NewLIR2(x86op, rl_dest.low_reg, rl_src.low_reg);
+ NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
x86op = GetOpcode(op, rl_dest, rl_src, true);
- NewLIR2(x86op, rl_dest.high_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
return;
}
@@ -1107,11 +1112,11 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_src.s_reg_low);
- LIR *lir = NewLIR3(x86op, rl_dest.low_reg, rBase, displacement + LOWORD_OFFSET);
+ LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rl_dest.high_reg, rBase, displacement + HIWORD_OFFSET);
+ lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
}
@@ -1138,15 +1143,15 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_dest.s_reg_low);
- LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.low_reg);
+ LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg());
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.high_reg);
+ lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -1188,12 +1193,12 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (IsTemp(rl_src1.low_reg) && IsTemp(rl_src1.high_reg)) {
+ if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else if (is_commutative) {
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
// We need at least one of them to be a temporary.
- if (!(IsTemp(rl_src2.low_reg) && IsTemp(rl_src2.high_reg))) {
+ if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
rl_src1 = ForceTempWide(rl_src1);
}
GenLongRegOrMemOp(rl_src1, rl_src2, op);
@@ -1234,15 +1239,16 @@ void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = ForceTempWide(rl_src);
- if (rl_dest.low_reg == rl_src.high_reg) {
+ if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
+ ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.low_reg);
- rl_result.high_reg = temp_reg;
+ OpRegCopy(temp_reg, rl_result.reg.GetReg());
+ rl_result.reg.SetHighReg(temp_reg);
}
- OpRegReg(kOpNeg, rl_result.low_reg, rl_result.low_reg); // rLow = -rLow
- OpRegImm(kOpAdc, rl_result.high_reg, 0); // rHigh = rHigh + CF
- OpRegReg(kOpNeg, rl_result.high_reg, rl_result.high_reg); // rHigh = -rHigh
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg()); // rLow = -rLow
+ OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0); // rHigh = rHigh + CF
+ OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); // rHigh = -rHigh
StoreValueWide(rl_dest, rl_result);
}
@@ -1284,29 +1290,29 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
// If index is constant, just fold it into the data offset
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.low_reg = INVALID_REG;
+ rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+ GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
len_offset, kThrowArrayBounds);
}
}
rl_result = EvalLoc(rl_dest, reg_class, true);
if ((size == kLong) || (size == kDouble)) {
- LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_result.low_reg,
- rl_result.high_reg, size, INVALID_SREG);
+ LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), size, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
- data_offset, rl_result.low_reg, INVALID_REG, size,
+ LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale,
+ data_offset, rl_result.reg.GetReg(), INVALID_REG, size,
INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
@@ -1338,18 +1344,18 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
constant_index_value = mir_graph_->ConstantValue(rl_index);
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.low_reg = INVALID_REG;
+ rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+ GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
len_offset, kThrowArrayBounds);
}
}
@@ -1359,21 +1365,21 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValue(rl_src, reg_class);
}
// If the src reg can't be byte accessed, move it to a temp first.
- if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
int temp = AllocTemp();
- OpRegCopy(temp, rl_src.low_reg);
- StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+ OpRegCopy(temp, rl_src.reg.GetReg());
+ StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp,
INVALID_REG, size, INVALID_SREG);
} else {
- StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
- rl_src.high_reg, size, INVALID_SREG);
+ StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(),
+ rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG);
}
if (card_mark) {
// Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
if (!constant_index) {
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
}
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
@@ -1385,52 +1391,52 @@ RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation
case Instruction::SHL_LONG_2ADDR:
DCHECK_NE(shift_amount, 1); // Prevent a double store from happening.
if (shift_amount == 32) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
- NewLIR2(kX86Sal32RI, rl_result.high_reg, shift_amount - 32);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shld32RRI, rl_result.high_reg, rl_result.low_reg, shift_amount);
- NewLIR2(kX86Sal32RI, rl_result.low_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount);
+ NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR2(kX86Sar32RI, rl_result.low_reg, shift_amount - 32);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32);
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- NewLIR2(kX86Shr32RI, rl_result.low_reg, shift_amount - 32);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
- NewLIR2(kX86Shr32RI, rl_result.high_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
default:
@@ -1567,7 +1573,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_h
int32_t value) {
bool in_mem = loc.location != kLocPhysReg;
bool byte_imm = IS_SIMM8(value);
- DCHECK(in_mem || !IsFpReg(loc.low_reg));
+ DCHECK(in_mem || !IsFpReg(loc.reg.GetReg()));
switch (op) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -1647,15 +1653,15 @@ void X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
DCHECK_EQ(rl_result.location, kLocPhysReg);
- DCHECK(!IsFpReg(rl_result.low_reg));
+ DCHECK(!IsFpReg(rl_result.reg.GetReg()));
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.low_reg, val_lo);
+ NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.high_reg, val_hi);
+ NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
}
StoreValueWide(rl_dest, rl_result);
}
@@ -1671,15 +1677,15 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
- rl_dest.low_reg == rl_src1.low_reg && rl_dest.high_reg == rl_src1.high_reg &&
- !IsFpReg(rl_dest.low_reg)) {
+ rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
+ !IsFpReg(rl_dest.reg.GetReg())) {
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
- NewLIR2(x86op, rl_dest.low_reg, val_lo);
+ NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
- NewLIR2(x86op, rl_dest.high_reg, val_hi);
+ NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi);
}
StoreFinalValueWide(rl_dest, rl_dest);
@@ -1693,11 +1699,11 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_result = ForceTempWide(rl_src1);
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.low_reg, val_lo);
+ NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.high_reg, val_hi);
+ NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
}
StoreFinalValueWide(rl_dest, rl_result);
@@ -1709,17 +1715,17 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
RegLocation rl_dest, RegLocation rl_src) {
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.low_reg;
+ int result_reg = rl_result.reg.GetReg();
// SETcc only works with EAX..EDX.
- if (result_reg == object.low_reg || result_reg >= 4) {
+ if (result_reg == object.reg.GetReg() || result_reg >= 4) {
result_reg = AllocTypedTemp(false, kCoreReg);
DCHECK_LT(result_reg, 4);
}
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
@@ -1730,11 +1736,11 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
LoadWordDisp(check_class, offset_of_type, check_class);
@@ -1755,7 +1761,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Compare the computed class to the class in the object.
DCHECK_EQ(object.location, kLocPhysReg);
- OpRegMem(kOpCmp, check_class, object.low_reg,
+ OpRegMem(kOpCmp, check_class, object.reg.GetReg(),
mirror::Object::ClassOffset().Int32Value());
// Set the low byte of the result to 0 or 1 from the compare condition code.
@@ -1765,7 +1771,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
null_branchover->target = target;
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.low_reg, result_reg);
+ OpRegCopy(rl_result.reg.GetReg(), result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1818,7 +1824,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
RegLocation rl_result = GetReturn(false);
// SETcc only works with EAX..EDX.
- DCHECK_LT(rl_result.low_reg, 4);
+ DCHECK_LT(rl_result.reg.GetReg(), 4);
// Is the class NULL?
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1830,13 +1836,13 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
LIR* branchover = nullptr;
if (type_known_final) {
// Ensure top 3 bytes of result are 0.
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
// Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
} else {
if (!type_known_abstract) {
- LoadConstant(rl_result.low_reg, 1); // Assume result succeeds.
+ LoadConstant(rl_result.reg.GetReg(), 1); // Assume result succeeds.
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
@@ -1964,7 +1970,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = UpdateLoc(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.low_reg, rl_lhs.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg());
} else {
if (shift_op) {
// X86 doesn't require masking and must use ECX.
@@ -1979,9 +1985,9 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
OpMemReg(op, rl_result, t_reg);
FreeTemp(t_reg);
return;
- } else if (!IsFpReg(rl_result.low_reg)) {
+ } else if (!IsFpReg(rl_result.reg.GetReg())) {
// Can do this directly into the result register
- OpRegReg(op, rl_result.low_reg, t_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), t_reg);
FreeTemp(t_reg);
StoreFinalValue(rl_dest, rl_result);
return;
@@ -1990,7 +1996,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Three address form, or we can't do directly.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, t_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
// Multiply is 3 operand only (sort of).
@@ -2001,11 +2007,11 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Can we do this from memory directly?
rl_rhs = UpdateLoc(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
- OpRegMem(op, rl_result.low_reg, rl_rhs);
+ OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
StoreFinalValue(rl_dest, rl_result);
return;
- } else if (!IsFpReg(rl_rhs.low_reg)) {
- OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
+ } else if (!IsFpReg(rl_rhs.reg.GetReg())) {
+ OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
StoreFinalValue(rl_dest, rl_result);
return;
}
@@ -2013,17 +2019,17 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_rhs = LoadValue(rl_rhs, kCoreReg);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
- OpMemReg(op, rl_result, rl_rhs.low_reg);
+ OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
return;
- } else if (!IsFpReg(rl_result.low_reg)) {
+ } else if (!IsFpReg(rl_result.reg.GetReg())) {
// Can do this directly into the result register.
- OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
StoreFinalValue(rl_dest, rl_result);
return;
} else {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
} else {
// Try to use reg/memory instructions.
@@ -2035,34 +2041,34 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
} else {
// We can optimize by moving to result and using memory operands.
if (rl_rhs.location != kLocPhysReg) {
// Force LHS into result.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadValueDirect(rl_lhs, rl_result.low_reg);
- OpRegMem(op, rl_result.low_reg, rl_rhs);
+ LoadValueDirect(rl_lhs, rl_result.reg.GetReg());
+ OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
} else if (rl_lhs.location != kLocPhysReg) {
// RHS is in a register; LHS is in memory.
if (op != kOpSub) {
// Force RHS into result and operate on memory.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopy(rl_result.low_reg, rl_rhs.low_reg);
- OpRegMem(op, rl_result.low_reg, rl_lhs);
+ OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegMem(op, rl_result.reg.GetReg(), rl_lhs);
} else {
// Subtraction isn't commutative.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
} else {
// Both are in registers.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
}
}
@@ -2073,10 +2079,10 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
// If we have non-core registers, then we can't do good things.
- if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.low_reg)) {
+ if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) {
return false;
}
- if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.low_reg)) {
+ if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) {
return false;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index eea7191c3b..ef8be3cc61 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -49,23 +49,19 @@ namespace art {
};
RegLocation X86Mir2Lir::LocCReturn() {
- RegLocation res = X86_LOC_C_RETURN;
- return res;
+ return x86_loc_c_return;
}
RegLocation X86Mir2Lir::LocCReturnWide() {
- RegLocation res = X86_LOC_C_RETURN_WIDE;
- return res;
+ return x86_loc_c_return_wide;
}
RegLocation X86Mir2Lir::LocCReturnFloat() {
- RegLocation res = X86_LOC_C_RETURN_FLOAT;
- return res;
+ return x86_loc_c_return_float;
}
RegLocation X86Mir2Lir::LocCReturnDouble() {
- RegLocation res = X86_LOC_C_RETURN_DOUBLE;
- return res;
+ return x86_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -390,19 +386,19 @@ void X86Mir2Lir::ClobberCallerSave() {
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- CHECK(res.low_reg == rAX);
- CHECK(res.high_reg == rDX);
+ CHECK(res.reg.GetReg() == rAX);
+ CHECK(res.reg.GetHighReg() == rDX);
Clobber(rAX);
Clobber(rDX);
MarkInUse(rAX);
MarkInUse(rDX);
- MarkPair(res.low_reg, res.high_reg);
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
return res;
}
RegLocation X86Mir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.low_reg = rDX;
+ res.reg.SetReg(rDX);
Clobber(rDX);
MarkInUse(rDX);
return res;
@@ -430,27 +426,21 @@ void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
NewLIR0(kX86Mfence);
#endif
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
- int reg_class) {
+
+// Alloc a pair of core registers, or a double.
+RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg; // only one allocated!
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ // TODO: take advantage of 64-bit notation.
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
-
low_reg = AllocTemp();
high_reg = AllocTemp();
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -493,11 +483,11 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
@@ -539,7 +529,8 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
: Mir2Lir(cu, mir_graph, arena),
method_address_insns_(arena, 100, kGrowableArrayMisc),
class_type_address_insns_(arena, 100, kGrowableArrayMisc),
- call_method_insns_(arena, 100, kGrowableArrayMisc) {
+ call_method_insns_(arena, 100, kGrowableArrayMisc),
+ stack_decrement_(nullptr), stack_increment_(nullptr) {
store_method_addr_used_ = false;
for (int i = 0; i < kX86Last; i++) {
if (X86Mir2Lir::EncodingMap[i].opcode != i) {
@@ -601,11 +592,11 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
if (match) {
// We can reuse;update the register usage info.
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_lo->reg; // Play nice with existing code.
loc.location = kLocPhysReg;
loc.vec_len = kVectorLength8;
- DCHECK(IsFpReg(loc.low_reg));
+ // TODO: use k64BitVector
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
+ DCHECK(IsFpReg(loc.reg.GetReg()));
return loc;
}
// We can't easily reuse; clobber and free any overlaps.
@@ -635,11 +626,10 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
}
if (match) {
// Can reuse - update the register usage info
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_hi->reg;
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
loc.location = kLocPhysReg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -663,7 +653,6 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
// TODO: Reunify with common code after 'pair mess' has been fixed
RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -671,38 +660,37 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
/* If it is already in a register, we can assume proper form. Is it the right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar());
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar());
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* It is the wrong register class. Reallocate and copy. */
- if (!IsFpReg(loc.low_reg)) {
+ if (!IsFpReg(loc.reg.GetReg())) {
// We want this in a FP reg, and it is in core registers.
DCHECK(reg_class != kCoreReg);
// Allocate this into any FP reg, and mark it with the right size.
low_reg = AllocTypedTemp(true, reg_class);
- OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = low_reg; // Play nice with existing code.
+ OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg.SetReg(low_reg);
+ loc.reg.SetHighReg(low_reg); // Play nice with existing code.
loc.vec_len = kVectorLength8;
} else {
// The value is in a FP register, and we want it in a pair of core registers.
DCHECK_EQ(reg_class, kCoreReg);
- DCHECK_EQ(loc.low_reg, loc.high_reg);
- new_regs = AllocTypedTempPair(false, kCoreReg); // Force to core registers.
- low_reg = new_regs & 0xff;
- high_reg = (new_regs >> 8) & 0xff;
+ DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg());
+ RegStorage new_regs = AllocTypedTempWide(false, kCoreReg); // Force to core registers.
+ low_reg = new_regs.GetReg();
+ high_reg = new_regs.GetHighReg();
DCHECK_NE(low_reg, high_reg);
- OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- CopyRegInfo(high_reg, loc.high_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = high_reg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ CopyRegInfo(high_reg, loc.reg.GetHighReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg = new_regs;
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
}
}
return loc;
@@ -711,21 +699,20 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- loc.low_reg = new_regs & 0xff;
- loc.high_reg = (new_regs >> 8) & 0xff;
+ loc.reg = AllocTypedTempWide(loc.fp, reg_class);
- if (loc.low_reg == loc.high_reg) {
- DCHECK(IsFpReg(loc.low_reg));
+ // FIXME: take advantage of RegStorage notation.
+ if (loc.reg.GetReg() == loc.reg.GetHighReg()) {
+ DCHECK(IsFpReg(loc.reg.GetReg()));
loc.vec_len = kVectorLength8;
} else {
- MarkPair(loc.low_reg, loc.high_reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
}
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
- if (loc.low_reg != loc.high_reg) {
- MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
}
}
return loc;
@@ -741,14 +728,14 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Realloc, copy and transfer ownership. */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.low_reg);
- CopyRegInfo(new_reg, loc.low_reg);
- Clobber(loc.low_reg);
- loc.low_reg = new_reg;
- if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
+ OpRegCopy(new_reg, loc.reg.GetReg());
+ CopyRegInfo(new_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ loc.reg.SetReg(new_reg);
+ if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
}
return loc;
@@ -756,14 +743,13 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.low_reg = new_reg;
- if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
+ loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class));
+ if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
}
return loc;
}
@@ -776,15 +762,15 @@ int X86Mir2Lir::AllocTempDouble() {
// TODO: Reunify with common code after 'pair mess' has been fixed
void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.low_reg);
- if (IsFpReg(rl.low_reg)) {
+ RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
+ if (IsFpReg(rl.reg.GetReg())) {
// We are using only the low register.
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
}
- ResetDef(rl.low_reg);
+ ResetDef(rl.reg.GetReg());
} else {
- RegisterInfo* p_high = IsTemp(rl.high_reg);
+ RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -792,8 +778,8 @@ void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.low_reg);
- ResetDef(rl.high_reg);
+ ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg());
}
}
@@ -832,8 +818,8 @@ void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
<< (loc.high_word ? " h" : " ")
<< (loc.home ? " H" : " ")
<< " vec_len: " << loc.vec_len
- << ", low: " << static_cast<int>(loc.low_reg)
- << ", high: " << static_cast<int>(loc.high_reg)
+ << ", low: " << static_cast<int>(loc.reg.GetReg())
+ << ", high: " << static_cast<int>(loc.reg.GetHighReg())
<< ", s_reg: " << loc.s_reg_low
<< ", orig: " << loc.orig_sreg;
}
@@ -1036,8 +1022,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Runtime start index.
rl_start = UpdateLoc(rl_start);
if (rl_start.location == kLocPhysReg) {
- length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr);
- OpRegReg(kOpSub, rCX, rl_start.low_reg);
+ length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr);
+ OpRegReg(kOpSub, rCX, rl_start.reg.GetReg());
} else {
// Compare to memory to avoid a register load. Handle pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
@@ -1066,13 +1052,13 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
}
} else {
if (rl_start.location == kLocPhysReg) {
- if (rl_start.low_reg == rDI) {
+ if (rl_start.reg.GetReg() == rDI) {
// We have a slight problem here. We are already using RDI!
// Grab the value from the stack.
LoadWordDisp(rX86_SP, 0, rDX);
OpLea(rDI, rBX, rDX, 1, 0);
} else {
- OpLea(rDI, rBX, rl_start.low_reg, 1, 0);
+ OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0);
}
} else {
OpRegCopy(rDI, rBX);
@@ -1094,14 +1080,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// index = ((curr_ptr - orig_ptr) / 2) - 1.
OpRegReg(kOpSub, rDI, rBX);
OpRegImm(kOpAsr, rDI, 1);
- NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1);
+ NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
LIR *all_done = NewLIR1(kX86Jmp8, 0);
// Failed to match; return -1.
LIR *not_found = NewLIR0(kPseudoTargetLabel);
length_compare->target = not_found;
failed_branch->target = not_found;
- LoadConstantNoClobber(rl_return.low_reg, -1);
+ LoadConstantNoClobber(rl_return.reg.GetReg(), -1);
// And join up at the end.
all_done->target = NewLIR0(kPseudoTargetLabel);
@@ -1118,4 +1104,166 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
return true;
}
+/*
+ * @brief Enter a 32 bit quantity into the FDE buffer
+ * @param buf FDE buffer.
+ * @param data Data value.
+ */
+static void PushWord(std::vector<uint8_t>&buf, int data) {
+ buf.push_back(data & 0xff);
+ buf.push_back((data >> 8) & 0xff);
+ buf.push_back((data >> 16) & 0xff);
+ buf.push_back((data >> 24) & 0xff);
+}
+
+/*
+ * @brief Enter an 'advance LOC' into the FDE buffer
+ * @param buf FDE buffer.
+ * @param increment Amount by which to increase the current location.
+ */
+static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
+ if (increment < 64) {
+ // Encoding in opcode.
+ buf.push_back(0x1 << 6 | increment);
+ } else if (increment < 256) {
+ // Single byte delta.
+ buf.push_back(0x02);
+ buf.push_back(increment);
+ } else if (increment < 256 * 256) {
+ // Two byte delta.
+ buf.push_back(0x03);
+ buf.push_back(increment & 0xff);
+ buf.push_back((increment >> 8) & 0xff);
+ } else {
+ // Four byte delta.
+ buf.push_back(0x04);
+ PushWord(buf, increment);
+ }
+}
+
+
+std::vector<uint8_t>* X86CFIInitialization() {
+ return X86Mir2Lir::ReturnCommonCallFrameInformation();
+}
+
+std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
+ std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+ // Length of the CIE (except for this field).
+ PushWord(*cfi_info, 16);
+
+ // CIE id.
+ PushWord(*cfi_info, 0xFFFFFFFFU);
+
+ // Version: 3.
+ cfi_info->push_back(0x03);
+
+ // Augmentation: empty string.
+ cfi_info->push_back(0x0);
+
+ // Code alignment: 1.
+ cfi_info->push_back(0x01);
+
+ // Data alignment: -4.
+ cfi_info->push_back(0x7C);
+
+ // Return address register (R8).
+ cfi_info->push_back(0x08);
+
+ // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
+ cfi_info->push_back(0x0C);
+ cfi_info->push_back(0x04);
+ cfi_info->push_back(0x04);
+
+ // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
+ cfi_info->push_back(0x2 << 6 | 0x08);
+ cfi_info->push_back(0x01);
+
+ // And 2 Noops to align to 4 byte boundary.
+ cfi_info->push_back(0x0);
+ cfi_info->push_back(0x0);
+
+ DCHECK_EQ(cfi_info->size() & 3, 0U);
+ return cfi_info;
+}
+
+static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
+ uint8_t buffer[12];
+ uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
+ for (uint8_t *p = buffer; p < ptr; p++) {
+ buf.push_back(*p);
+ }
+}
+
+std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
+ std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+ // Generate the FDE for the method.
+ DCHECK_NE(data_offset_, 0U);
+
+ // Length (will be filled in later in this routine).
+ PushWord(*cfi_info, 0);
+
+ // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
+ // one CIE for the whole debug_frame section.
+ PushWord(*cfi_info, 0);
+
+ // 'initial_location' (filled in by linker).
+ PushWord(*cfi_info, 0);
+
+ // 'address_range' (number of bytes in the method).
+ PushWord(*cfi_info, data_offset_);
+
+ // The instructions in the FDE.
+ if (stack_decrement_ != nullptr) {
+ // Advance LOC to just past the stack decrement.
+ uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
+ AdvanceLoc(*cfi_info, pc);
+
+ // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
+ cfi_info->push_back(0x0e);
+ EncodeUnsignedLeb128(*cfi_info, frame_size_);
+
+ // We continue with that stack until the epilogue.
+ if (stack_increment_ != nullptr) {
+ uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
+ AdvanceLoc(*cfi_info, new_pc - pc);
+
+ // We probably have code snippets after the epilogue, so save the
+ // current state: DW_CFA_remember_state.
+ cfi_info->push_back(0x0a);
+
+ // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
+ // PC on the stack now.
+ cfi_info->push_back(0x0e);
+ EncodeUnsignedLeb128(*cfi_info, 4);
+
+ // Everything after that is the same as before the epilogue.
+ // Stack bump was followed by RET instruction.
+ LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
+ if (post_ret_insn != nullptr) {
+ pc = new_pc;
+ new_pc = post_ret_insn->offset;
+ AdvanceLoc(*cfi_info, new_pc - pc);
+ // Restore the state: DW_CFA_restore_state.
+ cfi_info->push_back(0x0b);
+ }
+ }
+ }
+
+ // Padding to a multiple of 4
+ while ((cfi_info->size() & 3) != 0) {
+ // DW_CFA_nop is encoded as 0.
+ cfi_info->push_back(0);
+ }
+
+ // Set the length of the FDE inside the generated bytes.
+ uint32_t length = cfi_info->size() - 4;
+ (*cfi_info)[0] = length;
+ (*cfi_info)[1] = length >> 8;
+ (*cfi_info)[2] = length >> 16;
+ (*cfi_info)[3] = length >> 24;
+ return cfi_info;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 48a39bb5b4..d5d6b0e348 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -514,7 +514,7 @@ LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
// We don't know the proper offset for the value, so pick one that will force
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
- res = LoadBaseDisp(rl_method.low_reg, 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
+ res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
res->target = data_target;
res->flags.fixup = kFixupLoad;
SetMemRefType(res, true, kLiteral);
@@ -714,7 +714,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
break;
default:
- LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+ LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
}
if (!is_array) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 4064bd6550..09cbbeec82 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -126,13 +126,6 @@ namespace art {
/* Mask to strip off fp flags */
#define X86_FP_REG_MASK 0xF
-// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
-// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low
-#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG}
-
enum X86ResourceEncodingPos {
kX86GPReg0 = 0,
kX86RegSP = 4,
@@ -211,6 +204,22 @@ enum X86NativeRegisterPool {
#define rX86_COUNT rCX
#define rX86_PC INVALID_REG
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+const RegLocation x86_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
+const RegLocation x86_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
+// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
+const RegLocation x86_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
+ RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
+// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
+const RegLocation x86_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
+ RegStorage(RegStorage::k64BitPair, fr0, fr0), INVALID_SREG, INVALID_SREG};
+
/*
* The following enum defines the list of supported X86 instructions by the
* assembler. Their corresponding EncodingMap positions will be defined in
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
new file mode 100644
index 0000000000..c59617edea
--- /dev/null
+++ b/compiler/dex/reg_storage.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_REG_STORAGE_H_
+#define ART_COMPILER_DEX_REG_STORAGE_H_
+
+
+namespace art {
+
+/*
+ * Representation of the physical register, register pair or vector holding a Dalvik value.
+ * The basic configuration of the storage (i.e. solo reg, pair, vector) is common across all
+ * targets, but the encoding of the actual storage element is target independent.
+ *
+ * The two most-significant bits describe the basic shape of the storage, while meaning of the
+ * lower 14 bits depends on the shape:
+ *
+ * [PW]
+ * P: 0 -> pair, 1 -> solo (or vector)
+ * W: 1 -> 64 bits, 0 -> 32 bits
+ *
+ * [00] [xxxxxxxxxxxxxx] Invalid (typically all zeros)
+ * [01] [HHHHHHH] [LLLLLLL] 64-bit storage, composed of 2 32-bit registers
+ * [10] [0] [xxxxxx] [RRRRRRR] 32-bit solo register
+ * [11] [0] [xxxxxx] [RRRRRRR] 64-bit solo register
+ * [10] [1] [xxxxxx] [VVVVVVV] 32-bit vector storage
+ * [11] [1] [xxxxxx] [VVVVVVV] 64-bit vector storage
+ *
+ * x - don't care
+ * L - low register number of a pair
+ * H - high register number of a pair
+ * R - register number of a solo reg
+ * V - vector description
+ *
+ * Note that in all non-invalid cases, the low 7 bits must be sufficient to describe
+ * whether the storage element is floating point (see IsFloatReg()).
+ *
+ */
+
+class RegStorage {
+ public:
+ enum RegStorageKind {
+ kInvalid = 0x0000,
+ k64BitPair = 0x4000,
+ k32BitSolo = 0x8000,
+ k64BitSolo = 0xc000,
+ k32BitVector = 0xa000,
+ k64BitVector = 0xe000,
+ kPairMask = 0x8000,
+ kPair = 0x0000,
+ kSizeMask = 0x4000,
+ k64Bit = 0x4000,
+ k32Bit = 0x0000,
+ kVectorMask = 0xa000,
+ kVector = 0xa000,
+ kSolo = 0x8000,
+ kShapeMask = 0xc000,
+ kKindMask = 0xe000
+ };
+
+ static const uint16_t kRegValMask = 0x007f;
+ static const uint16_t kHighRegShift = 7;
+ static const uint16_t kHighRegMask = kRegValMask << kHighRegShift;
+
+ RegStorage(RegStorageKind rs_kind, int reg) {
+ DCHECK_NE(rs_kind & kShapeMask, kInvalid);
+ DCHECK_NE(rs_kind & kShapeMask, k64BitPair);
+ DCHECK_EQ(rs_kind & ~kKindMask, 0);
+ DCHECK_EQ(reg & ~kRegValMask, 0);
+ reg_ = rs_kind | reg;
+ }
+ RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg) {
+ DCHECK_EQ(rs_kind, k64BitPair);
+ DCHECK_EQ(low_reg & ~kRegValMask, 0);
+ DCHECK_EQ(high_reg & ~kRegValMask, 0);
+ reg_ = rs_kind | (high_reg << kHighRegShift) | low_reg;
+ }
+ explicit RegStorage(uint16_t val) : reg_(val) {}
+ RegStorage() : reg_(kInvalid) {}
+ ~RegStorage() {}
+
+ bool IsInvalid() const {
+ return ((reg_ & kShapeMask) == kInvalid);
+ }
+
+ bool Is32Bit() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kSizeMask) == k32Bit);
+ }
+
+ bool Is64Bit() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kSizeMask) == k64Bit);
+ }
+
+ bool IsPair() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kPairMask) == kPair);
+ }
+
+ bool IsSolo() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kVectorMask) == kSolo);
+ }
+
+ bool IsVector() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kVectorMask) == kVector);
+ }
+
+ // Used to retrieve either the low register of a pair, or the only register.
+ int GetReg() const {
+ DCHECK(!IsInvalid());
+ return (reg_ & kRegValMask);
+ }
+
+ void SetReg(int reg) {
+ DCHECK(!IsInvalid());
+ reg_ = (reg_ & ~kRegValMask) | reg;
+ DCHECK_EQ(GetReg(), reg);
+ }
+
+ // Retrieve the most significant register of a pair.
+ int GetHighReg() const {
+ DCHECK(IsPair());
+ return (reg_ & kHighRegMask) >> kHighRegShift;
+ }
+
+ void SetHighReg(int reg) {
+ DCHECK(IsPair());
+ reg_ = (reg_ & ~kHighRegMask) | (reg << kHighRegShift);
+ DCHECK_EQ(GetHighReg(), reg);
+ }
+
+ int GetRawBits() const {
+ return reg_;
+ }
+
+ private:
+ uint16_t reg_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_REG_STORAGE_H_
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index f8dc223af7..4d2c05166b 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -379,16 +379,14 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
if (cg != NULL) {
for (int i = 0; i < count; i++) {
- LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
+ LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d",
table[i].orig_sreg, storage_name[table[i].location],
table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
table[i].is_const ? 'c' : 'n',
table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
- cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
- table[i].low_reg & cg->FpRegMask(),
- cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
- table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
+ table[i].reg.GetRawBits(),
+ table[i].s_reg_low);
}
} else {
// Either pre-regalloc or Portable.
@@ -404,9 +402,9 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
}
}
-static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
- kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG,
- INVALID_SREG};
+// FIXME - will likely need to revisit all uses of this.
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
+ RegStorage(), INVALID_SREG, INVALID_SREG};
void MIRGraph::InitRegLocations() {
/* Allocate the location map */
diff --git a/compiler/driver/compiler_callbacks_impl.h b/compiler/driver/compiler_callbacks_impl.h
index fd2cd4a31d..ed6a9255b9 100644
--- a/compiler/driver/compiler_callbacks_impl.h
+++ b/compiler/driver/compiler_callbacks_impl.h
@@ -23,7 +23,7 @@
namespace art {
-class CompilerCallbacksImpl : public CompilerCallbacks {
+class CompilerCallbacksImpl FINAL : public CompilerCallbacks {
public:
CompilerCallbacksImpl(VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map)
@@ -33,10 +33,10 @@ class CompilerCallbacksImpl : public CompilerCallbacks {
CHECK(method_inliner_map != nullptr);
}
- virtual ~CompilerCallbacksImpl() { }
+ ~CompilerCallbacksImpl() { }
- virtual bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool MethodVerified(verifier::MethodVerifier* verifier)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
bool result = verification_results_->ProcessVerifiedMethod(verifier);
if (result) {
MethodReference ref = verifier->GetMethodReference();
@@ -45,7 +45,7 @@ class CompilerCallbacksImpl : public CompilerCallbacks {
}
return result;
}
- virtual void ClassRejected(ClassReference ref) {
+ void ClassRejected(ClassReference ref) OVERRIDE {
verification_results_->AddRejectedClass(ref);
}
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
new file mode 100644
index 0000000000..d401398ca4
--- /dev/null
+++ b/compiler/driver/compiler_driver-inl.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+#define ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+
+#include "compiler_driver.h"
+#include "dex/compiler_ir.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/art_field-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
+ return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+}
+
+inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
+ const DexCompilationUnit* mUnit) {
+ return soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+}
+
+inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ const DexFile::MethodId& referrer_method_id =
+ mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
+ mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
+ *mUnit->GetDexFile(), referrer_method_id.class_idx_, dex_cache, class_loader);
+ DCHECK_EQ(referrer_class == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(referrer_class == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ }
+ return referrer_class;
+}
+
+inline mirror::ArtField* CompilerDriver::ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
+ *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ // ClassLinker can return a field of the wrong kind directly from the DexCache.
+ // Silently return nullptr on such incompatible class change.
+ return nullptr;
+ }
+ return resolved_field;
+}
+
+inline void CompilerDriver::GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
+ mirror::Class* declaring_class = resolved_field->GetDeclaringClass();
+ *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
+ *declaring_class_idx = declaring_class->GetDexTypeIndex();
+ *declaring_field_idx = resolved_field->GetDexFieldIndex();
+}
+
+inline bool CompilerDriver::IsFieldVolatile(mirror::ArtField* field) {
+ return field->IsVolatile();
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset) {
+ DCHECK(!resolved_field->IsStatic());
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ bool fast_get = referrer_class != nullptr &&
+ referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx);
+ bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class);
+ *field_offset = fast_get ? resolved_field->GetOffset() : MemberOffset(0u);
+ return std::make_pair(fast_get, fast_put);
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized) {
+ DCHECK(resolved_field->IsStatic());
+ if (LIKELY(referrer_class != nullptr)) {
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ if (fields_class == referrer_class) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = fields_class->GetDexTypeIndex();
+ *is_referrers_class = true; // implies no worrying about class initialization
+ *is_initialized = true;
+ return std::make_pair(true, true);
+ }
+ if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx)) {
+ // We have the resolved field, we must make it into a index for the referrer
+ // in its static storage (which may fail if it doesn't have a slot for it)
+ // TODO: for images we can elide the static storage base null check
+ // if we know there's a non-null entry in the image
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint32_t storage_idx = DexFile::kDexNoIndex;
+ if (LIKELY(fields_class->GetDexCache() == dex_cache)) {
+ // common case where the dex cache of both the referrer and the field are the same,
+ // no need to search the dex file
+ storage_idx = fields_class->GetDexTypeIndex();
+ } else {
+ // Search dex file for localized ssb index, may fail if field's class is a parent
+ // of the class mentioned in the dex file and there is no dex cache entry.
+ const DexFile::StringId* string_id =
+ dex_file->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
+ if (string_id != nullptr) {
+ const DexFile::TypeId* type_id =
+ dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+ if (type_id != nullptr) {
+ // medium path, needs check of static storage base being initialized
+ storage_idx = dex_file->GetIndexForTypeId(*type_id);
+ }
+ }
+ }
+ if (storage_idx != DexFile::kDexNoIndex) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = storage_idx;
+ *is_referrers_class = false;
+ *is_initialized = fields_class->IsInitialized() &&
+ CanAssumeTypeIsPresentInDexCache(*dex_file, storage_idx);
+ return std::make_pair(true, !resolved_field->IsFinal());
+ }
+ }
+ }
+ // Conservative defaults.
+ *field_offset = MemberOffset(0u);
+ *storage_index = DexFile::kDexNoIndex;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ return std::make_pair(false, false);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1b284de9cc..fc22addbf1 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,11 +26,13 @@
#include "base/timing_logger.h"
#include "class_linker.h"
#include "compiler_backend.h"
+#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
#include "dex/verification_results.h"
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
+#include "driver/compiler_options.h"
#include "jni_internal.h"
#include "object_utils.h"
#include "runtime.h"
@@ -323,10 +325,12 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
support_boot_image_fixup_(instruction_set != kMips),
+ cfi_info_(nullptr),
dedupe_code_("dedupe code"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
- dedupe_gc_map_("dedupe gc map") {
+ dedupe_gc_map_("dedupe gc map"),
+ dedupe_cfi_info_("dedupe cfi info") {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
@@ -341,6 +345,11 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
if (!image_) {
CHECK(image_classes_.get() == NULL);
}
+
+ // Are we generating CFI information?
+ if (compiler_options->GetGenerateGDBInformation()) {
+ cfi_info_.reset(compiler_backend_->GetCallFrameInformationInitialization(*this));
+ }
}
std::vector<uint8_t>* CompilerDriver::DeduplicateCode(const std::vector<uint8_t>& code) {
@@ -359,6 +368,13 @@ std::vector<uint8_t>* CompilerDriver::DeduplicateGCMap(const std::vector<uint8_t
return dedupe_gc_map_.Add(Thread::Current(), code);
}
+std::vector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info) {
+ if (cfi_info == nullptr) {
+ return nullptr;
+ }
+ return dedupe_cfi_info_.Add(Thread::Current(), *cfi_info);
+}
+
CompilerDriver::~CompilerDriver() {
Thread* self = Thread::Current();
{
@@ -424,6 +440,11 @@ const std::vector<uint8_t>* CompilerDriver::CreatePortableToInterpreterBridge()
PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge));
}
+const std::vector<uint8_t>* CompilerDriver::CreateQuickGenericJniTrampoline() const {
+ return CreateTrampoline(instruction_set_, kQuickAbi,
+ QUICK_ENTRYPOINT_OFFSET(pQuickGenericJniTrampoline));
+}
+
const std::vector<uint8_t>* CompilerDriver::CreateQuickImtConflictTrampoline() const {
return CreateTrampoline(instruction_set_, kQuickAbi,
QUICK_ENTRYPOINT_OFFSET(pQuickImtConflictTrampoline));
@@ -441,11 +462,11 @@ const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() con
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings) {
+ TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
- PreCompile(class_loader, dex_files, *thread_pool.get(), timings);
- Compile(class_loader, dex_files, *thread_pool.get(), timings);
+ PreCompile(class_loader, dex_files, thread_pool.get(), timings);
+ Compile(class_loader, dex_files, thread_pool.get(), timings);
if (dump_stats_) {
stats_->Dump();
}
@@ -483,7 +504,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
}
}
-void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings) {
+void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
Thread* self = Thread::Current();
jobject jclass_loader;
@@ -510,7 +531,7 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings
dex_files.push_back(dex_file);
UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", 0U));
- PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings);
+ PreCompile(jclass_loader, dex_files, thread_pool.get(), timings);
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
@@ -531,7 +552,7 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings
}
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -540,7 +561,7 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFi
}
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
LoadImageClasses(timings);
Resolve(class_loader, dex_files, thread_pool, timings);
@@ -625,13 +646,13 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
}
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger& timings)
+void CompilerDriver::LoadImageClasses(TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
if (!IsImage()) {
return;
}
- timings.NewSplit("LoadImageClasses");
+ timings->NewSplit("LoadImageClasses");
// Make a first class to load all classes explicitly listed in the file
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -713,9 +734,9 @@ void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void
MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get());
}
-void CompilerDriver::UpdateImageClasses(TimingLogger& timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
if (IsImage()) {
- timings.NewSplit("UpdateImageClasses");
+ timings->NewSplit("UpdateImageClasses");
// Update image_classes_ with classes for objects created by <clinit> methods.
Thread* self = Thread::Current();
@@ -886,6 +907,24 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
}
}
+void CompilerDriver::ProcessedInstanceField(bool resolved) {
+ if (!resolved) {
+ stats_->UnresolvedInstanceField();
+ } else {
+ stats_->ResolvedInstanceField();
+ }
+}
+
+void CompilerDriver::ProcessedStaticField(bool resolved, bool local) {
+ if (!resolved) {
+ stats_->UnresolvedStaticField();
+ } else if (local) {
+ stats_->ResolvedLocalStaticField();
+ } else {
+ stats_->ResolvedStaticField();
+ }
+}
+
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
@@ -903,15 +942,6 @@ static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
dex_cache, class_loader);
}
-static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(
- ScopedObjectAccess& soa, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache,
- class_loader, is_static);
-}
-
static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit,
uint32_t method_idx,
@@ -947,117 +977,80 @@ bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
}
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, bool* is_volatile) {
+ bool is_put, MemberOffset* field_offset,
+ bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *is_volatile = true;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, false);
- if (resolved_field != NULL && !resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
- fields_class != referrer_class;
- if (access_ok && !is_write_to_final_from_wrong_class) {
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedInstanceField();
- return true; // Fast path.
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedInstanceField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastInstanceField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ }
+ ProcessedInstanceField(result);
+ return result;
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, int* storage_index,
- bool* is_referrers_class, bool* is_volatile,
- bool* is_initialized) {
+ bool is_put, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class,
+ bool* is_volatile, bool* is_initialized) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *storage_index = -1;
- *is_referrers_class = false;
- *is_volatile = true;
- *is_initialized = false;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, true);
- if (resolved_field != NULL && resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- if (fields_class == referrer_class) {
- *is_referrers_class = true; // implies no worrying about class initialization
- *is_initialized = true;
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedLocalStaticField();
- return true; // fast path
- } else {
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
- if (access_ok && !is_write_to_final_from_wrong_class) {
- // We have the resolved field, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
- if (fields_class->GetDexCache() == dex_cache) {
- // common case where the dex cache of both the referrer and the field are the same,
- // no need to search the dex file
- *storage_index = fields_class->GetDexTypeIndex();
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- // Search dex file for localized ssb index, may fail if field's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- const DexFile::StringId* string_id =
- mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
- if (string_id != NULL) {
- const DexFile::TypeId* type_id =
- mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
- if (type_id != NULL) {
- // medium path, needs check of static storage base being initialized
- *storage_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- }
- }
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedStaticField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastStaticField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset,
+ storage_index, is_referrers_class, is_initialized);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ *storage_index = -1;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ }
+ ProcessedStaticField(result, *is_referrers_class);
+ return result;
}
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
@@ -1368,13 +1361,13 @@ class ParallelCompilationManager {
jobject class_loader,
CompilerDriver* compiler,
const DexFile* dex_file,
- ThreadPool& thread_pool)
+ ThreadPool* thread_pool)
: index_(0),
class_linker_(class_linker),
class_loader_(class_loader),
compiler_(compiler),
dex_file_(dex_file),
- thread_pool_(&thread_pool) {}
+ thread_pool_(thread_pool) {}
ClassLinker* GetClassLinker() const {
CHECK(class_linker_ != NULL);
@@ -1628,7 +1621,7 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
}
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: we could resolve strings here, although the string table is largely filled with class
@@ -1638,16 +1631,16 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
if (IsImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
- timings.NewSplit("Resolve Types");
+ timings->NewSplit("Resolve Types");
context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
}
- timings.NewSplit("Resolve MethodsAndFields");
+ timings->NewSplit("Resolve MethodsAndFields");
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
}
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1702,8 +1695,8 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
}
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("Verify Dex File");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("Verify Dex File");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
@@ -1805,8 +1798,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
}
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("InitializeNoClinit");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("InitializeNoClinit");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool);
size_t thread_count;
@@ -1825,7 +1818,7 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile&
void CompilerDriver::InitializeClasses(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1834,7 +1827,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1916,8 +1909,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
}
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("Compile Dex File");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("Compile Dex File");
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
@@ -1932,8 +1925,12 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
uint64_t start_ns = NanoTime();
if ((access_flags & kAccNative) != 0) {
+#if defined(__x86_64__)
+ // leaving this empty will trigger the generic JNI version
+#else
compiled_method = compiler_backend_->JniCompile(*this, access_flags, method_idx, dex_file);
CHECK(compiled_method != NULL);
+#endif
} else if ((access_flags & kAccAbstract) != 0) {
} else {
MethodReference method_ref(&dex_file, method_idx);
@@ -2037,38 +2034,38 @@ bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex
bool CompilerDriver::WriteElf(const std::string& android_root,
bool is_host,
const std::vector<const art::DexFile*>& dex_files,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
art::File* file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return compiler_backend_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
}
void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set,
- std::string& target_triple,
- std::string& target_cpu,
- std::string& target_attr) {
+ std::string* target_triple,
+ std::string* target_cpu,
+ std::string* target_attr) {
switch (instruction_set) {
case kThumb2:
- target_triple = "thumb-none-linux-gnueabi";
- target_cpu = "cortex-a9";
- target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db";
+ *target_triple = "thumb-none-linux-gnueabi";
+ *target_cpu = "cortex-a9";
+ *target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db";
break;
case kArm:
- target_triple = "armv7-none-linux-gnueabi";
+ *target_triple = "armv7-none-linux-gnueabi";
// TODO: Fix for Nexus S.
- target_cpu = "cortex-a9";
+ *target_cpu = "cortex-a9";
// TODO: Fix for Xoom.
- target_attr = "+v7,+neon,+neonfp,+vfp3,+db";
+ *target_attr = "+v7,+neon,+neonfp,+vfp3,+db";
break;
case kX86:
- target_triple = "i386-pc-linux-gnu";
- target_attr = "";
+ *target_triple = "i386-pc-linux-gnu";
+ *target_attr = "";
break;
case kMips:
- target_triple = "mipsel-unknown-linux";
- target_attr = "mips32r2";
+ *target_triple = "mipsel-unknown-linux";
+ *target_attr = "mips32r2";
break;
default:
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 377eb6fa34..80a6796a4e 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -48,9 +48,10 @@ class AOTCompilationStats;
class CompilerOptions;
class DexCompilationUnit;
class DexFileToMethodInlinerMap;
-class InlineIGetIPutData;
+struct InlineIGetIPutData;
class OatWriter;
class ParallelCompilationManager;
+class ScopedObjectAccess;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -108,11 +109,11 @@ class CompilerDriver {
~CompilerDriver();
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings)
+ TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Compile a single Method.
- void CompileOne(mirror::ArtMethod* method, TimingLogger& timings)
+ void CompileOne(mirror::ArtMethod* method, TimingLogger* timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const {
@@ -123,16 +124,15 @@ class CompilerDriver {
return method_inliner_map_;
}
- const InstructionSet& GetInstructionSet() const {
+ InstructionSet GetInstructionSet() const {
return instruction_set_;
}
- const InstructionSetFeatures& GetInstructionSetFeatures() const {
+ InstructionSetFeatures GetInstructionSetFeatures() const {
return instruction_set_features_;
}
const CompilerOptions& GetCompilerOptions() const {
- DCHECK(compiler_options_ != nullptr);
return *compiler_options_;
}
@@ -164,6 +164,8 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
@@ -204,6 +206,53 @@ class CompilerDriver {
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr);
+ // Get the DexCache for the
+ mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve compiling method's class. Returns nullptr on failure.
+ mirror::Class* ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
+ mirror::ArtField* ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get declaration location of a resolved field.
+ void GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
+ std::pair<bool, bool> IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the field offset,
+ // the type index of the declaring class in the referrer's dex file and whether the declaring
+ // class is the referrer's class or at least can be assumed to be initialized.
+ std::pair<bool, bool> IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void ProcessedInstanceField(bool resolved);
+ void ProcessedStaticField(bool resolved, bool local);
+
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
@@ -213,13 +262,13 @@ class CompilerDriver {
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, bool* is_volatile)
+ MemberOffset* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, int* storage_index,
+ MemberOffset* field_offset, uint32_t* storage_index,
bool* is_referrers_class, bool* is_volatile, bool* is_initialized)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -275,21 +324,21 @@ class CompilerDriver {
support_boot_image_fixup_ = support_boot_image_fixup;
}
- ArenaPool& GetArenaPool() {
- return arena_pool_;
+ ArenaPool* GetArenaPool() {
+ return &arena_pool_;
}
bool WriteElf(const std::string& android_root,
bool is_host,
const std::vector<const DexFile*>& dex_files,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
File* file);
- // TODO: move to a common home for llvm helpers once quick/portable are merged
+ // TODO: move to a common home for llvm helpers once quick/portable are merged.
static void InstructionSetToLLVMTarget(InstructionSet instruction_set,
- std::string& target_triple,
- std::string& target_cpu,
- std::string& target_attr);
+ std::string* target_triple,
+ std::string* target_cpu,
+ std::string* target_attr);
void SetCompilerContext(void* compiler_context) {
compiler_context_ = compiler_context;
@@ -310,8 +359,8 @@ class CompilerDriver {
return dump_passes_;
}
- CumulativeLogger& GetTimingsLogger() const {
- return *timings_logger_;
+ CumulativeLogger* GetTimingsLogger() const {
+ return timings_logger_;
}
class PatchInformation {
@@ -494,6 +543,15 @@ class CompilerDriver {
std::vector<uint8_t>* DeduplicateMappingTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateVMapTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
+ std::vector<uint8_t>* DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info);
+
+ /*
+ * @brief return the pointer to the Call Frame Information.
+ * @return pointer to call frame information for this compilation.
+ */
+ std::vector<uint8_t>* GetCallFrameInformation() const {
+ return cfi_info_.get();
+ }
private:
// Compute constant code and method pointers when possible
@@ -507,43 +565,42 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void LoadImageClasses(TimingLogger& timings);
+ void LoadImageClasses(TimingLogger* timings);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool* thread_pool, TimingLogger* timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
- void UpdateImageClasses(TimingLogger& timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool* thread_pool, TimingLogger* timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
@@ -627,6 +684,9 @@ class CompilerDriver {
bool support_boot_image_fixup_;
+ // Call Frame Information, which might be generated to help stack tracebacks.
+ UniquePtr<std::vector<uint8_t> > cfi_info_;
+
// DeDuplication data structures, these own the corresponding byte arrays.
class DedupeHashFunc {
public:
@@ -665,6 +725,7 @@ class CompilerDriver {
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_cfi_info_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 34806ce293..2b3af6281f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -40,7 +40,7 @@ class CompilerDriverTest : public CommonCompilerTest {
timings.StartSplit("CompileAll");
compiler_driver_->CompileAll(class_loader,
Runtime::Current()->GetCompileTimeClassPath(class_loader),
- timings);
+ &timings);
MakeAllExecutable(class_loader);
timings.EndSplit();
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 9f6745b015..39738ab049 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,8 @@ class CompilerOptions {
large_method_threshold_(kDefaultLargeMethodThreshold),
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
- num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold)
+ num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
+ generate_gdb_information_(false)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(false)
#endif
@@ -54,7 +55,8 @@ class CompilerOptions {
size_t large_method_threshold,
size_t small_method_threshold,
size_t tiny_method_threshold,
- size_t num_dex_methods_threshold
+ size_t num_dex_methods_threshold,
+ bool generate_gdb_information
#ifdef ART_SEA_IR_MODE
, bool sea_ir_mode
#endif
@@ -64,7 +66,8 @@ class CompilerOptions {
large_method_threshold_(large_method_threshold),
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
- num_dex_methods_threshold_(num_dex_methods_threshold)
+ num_dex_methods_threshold_(num_dex_methods_threshold),
+ generate_gdb_information_(generate_gdb_information)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(sea_ir_mode)
#endif
@@ -118,6 +121,10 @@ class CompilerOptions {
bool GetSeaIrMode();
#endif
+ bool GetGenerateGDBInformation() const {
+ return generate_gdb_information_;
+ }
+
private:
CompilerFilter compiler_filter_;
size_t huge_method_threshold_;
@@ -125,6 +132,7 @@ class CompilerOptions {
size_t small_method_threshold_;
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
+ bool generate_gdb_information_;
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode_;
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index 6db3fa1a11..ccc26a1baf 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -30,12 +30,7 @@
namespace art {
-ElfWriter::ElfWriter(const CompilerDriver& driver, File* elf_file)
- : compiler_driver_(&driver), elf_file_(elf_file) {}
-
-ElfWriter::~ElfWriter() {}
-
-Elf32_Addr ElfWriter::GetOatDataAddress(ElfFile* elf_file) {
+uint32_t ElfWriter::GetOatDataAddress(ElfFile* elf_file) {
Elf32_Addr oatdata_address = elf_file->FindSymbolAddress(SHT_DYNSYM,
"oatdata",
false);
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 99dfc56d49..3610d1a8b2 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -23,7 +23,6 @@
#include <vector>
#include "base/macros.h"
-#include "elf_utils.h"
#include "os.h"
namespace art {
@@ -42,21 +41,23 @@ class ElfWriter {
size_t& oat_data_offset);
// Returns runtime oat_data runtime address for an opened ElfFile.
- static Elf32_Addr GetOatDataAddress(ElfFile* elf_file);
+ static uint32_t GetOatDataAddress(ElfFile* elf_file);
protected:
- ElfWriter(const CompilerDriver& driver, File* elf_file);
- virtual ~ElfWriter();
+ ElfWriter(const CompilerDriver& driver, File* elf_file)
+ : compiler_driver_(&driver), elf_file_(elf_file) {
+ }
- virtual bool Write(OatWriter& oat_writer,
+ virtual ~ElfWriter() {}
+
+ virtual bool Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
- // Setup by constructor
- const CompilerDriver* compiler_driver_;
- File* elf_file_;
+ const CompilerDriver* const compiler_driver_;
+ File* const elf_file_;
};
} // namespace art
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index c7baf4f3e5..b2d3a69e74 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -16,6 +16,7 @@
#include "elf_writer_mclinker.h"
+#include <llvm/Support/ELF.h>
#include <llvm/Support/TargetSelect.h>
#include <mcld/Environment.h>
@@ -32,7 +33,6 @@
#include "class_linker.h"
#include "dex_method_iterator.h"
#include "driver/compiler_driver.h"
-#include "elf_file.h"
#include "globals.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
@@ -44,12 +44,14 @@
namespace art {
ElfWriterMclinker::ElfWriterMclinker(const CompilerDriver& driver, File* elf_file)
- : ElfWriter(driver, elf_file), oat_input_(NULL) {}
+ : ElfWriter(driver, elf_file), oat_input_(nullptr) {
+}
-ElfWriterMclinker::~ElfWriterMclinker() {}
+ElfWriterMclinker::~ElfWriterMclinker() {
+}
bool ElfWriterMclinker::Create(File* elf_file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -58,29 +60,29 @@ bool ElfWriterMclinker::Create(File* elf_file,
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
-bool ElfWriterMclinker::Write(OatWriter& oat_writer,
+bool ElfWriterMclinker::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host) {
std::vector<uint8_t> oat_contents;
- oat_contents.reserve(oat_writer.GetSize());
+ oat_contents.reserve(oat_writer->GetSize());
VectorOutputStream output_stream("oat contents", oat_contents);
- CHECK(oat_writer.Write(output_stream));
- CHECK_EQ(oat_writer.GetSize(), oat_contents.size());
+ CHECK(oat_writer->Write(&output_stream));
+ CHECK_EQ(oat_writer->GetSize(), oat_contents.size());
Init();
AddOatInput(oat_contents);
-#if defined(ART_USE_PORTABLE_COMPILER)
- AddMethodInputs(dex_files);
- AddRuntimeInputs(android_root, is_host);
-#endif
+ if (kUsePortableCompiler) {
+ AddMethodInputs(dex_files);
+ AddRuntimeInputs(android_root, is_host);
+ }
if (!Link()) {
return false;
}
oat_contents.clear();
-#if defined(ART_USE_PORTABLE_COMPILER)
- FixupOatMethodOffsets(dex_files);
-#endif
+ if (kUsePortableCompiler) {
+ FixupOatMethodOffsets(dex_files);
+ }
return true;
}
@@ -100,9 +102,9 @@ void ElfWriterMclinker::Init() {
std::string target_cpu;
std::string target_attr;
CompilerDriver::InstructionSetToLLVMTarget(compiler_driver_->GetInstructionSet(),
- target_triple,
- target_cpu,
- target_attr);
+ &target_triple,
+ &target_cpu,
+ &target_attr);
// Based on mclinker's llvm-mcld.cpp main() and LinkerTest
//
@@ -236,7 +238,6 @@ void ElfWriterMclinker::AddOatInput(std::vector<uint8_t>& oat_contents) {
text_section);
}
-#if defined(ART_USE_PORTABLE_COMPILER)
void ElfWriterMclinker::AddMethodInputs(const std::vector<const DexFile*>& dex_files) {
DCHECK(oat_input_ != NULL);
@@ -320,7 +321,6 @@ void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool i
mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib);
CHECK(libm_lib_input_input != NULL);
}
-#endif
bool ElfWriterMclinker::Link() {
// link inputs
@@ -345,7 +345,6 @@ bool ElfWriterMclinker::Link() {
return true;
}
-#if defined(ART_USE_PORTABLE_COMPILER)
void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files) {
std::string error_msg;
UniquePtr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false, &error_msg));
@@ -409,6 +408,5 @@ uint32_t ElfWriterMclinker::FixupCompiledCodeOffset(ElfFile& elf_file,
}
return compiled_code_offset;
}
-#endif
} // namespace art
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
index 8ee7231f79..13757edecd 100644
--- a/compiler/elf_writer_mclinker.h
+++ b/compiler/elf_writer_mclinker.h
@@ -37,11 +37,11 @@ namespace art {
class CompiledCode;
-class ElfWriterMclinker : public ElfWriter {
+class ElfWriterMclinker FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
static bool Create(File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -49,10 +49,11 @@ class ElfWriterMclinker : public ElfWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- virtual bool Write(OatWriter& oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host)
+ bool Write(OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host)
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -65,13 +66,11 @@ class ElfWriterMclinker : public ElfWriter {
void AddCompiledCodeInput(const CompiledCode& compiled_code);
void AddRuntimeInputs(const std::string& android_root, bool is_host);
bool Link();
-#if defined(ART_USE_PORTABLE_COMPILER)
void FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t FixupCompiledCodeOffset(ElfFile& elf_file,
- ::llvm::ELF::Elf32_Addr oatdata_address,
+ uint32_t oatdata_address,
const CompiledCode& compiled_code);
-#endif
// Setup by Init()
UniquePtr<mcld::LinkerConfig> linker_config_;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 3191374160..4b823ef5ec 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -29,13 +29,8 @@
namespace art {
-ElfWriterQuick::ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
- : ElfWriter(driver, elf_file) {}
-
-ElfWriterQuick::~ElfWriterQuick() {}
-
bool ElfWriterQuick::Create(File* elf_file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -44,7 +39,7 @@ bool ElfWriterQuick::Create(File* elf_file,
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
-bool ElfWriterQuick::Write(OatWriter& oat_writer,
+bool ElfWriterQuick::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files_unused,
const std::string& android_root_unused,
bool is_host_unused) {
@@ -103,6 +98,7 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// | .rodata\0 |
// | .text\0 |
// | .shstrtab\0 |
+ // | .debug_frame\0 |
// +-------------------------+
// | Elf32_Shdr NULL |
// | Elf32_Shdr .dynsym |
@@ -112,6 +108,9 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// | Elf32_Shdr .rodata |
// | Elf32_Shdr .dynamic |
// | Elf32_Shdr .shstrtab |
+ // | Elf32_Shdr .debug_info | (Optional)
+ // | Elf32_Shdr .debug_abbrev| (Optional)
+ // | Elf32_Shdr .debug_frame | (Optional)
// +-------------------------+
// phase 1: computing offsets
@@ -197,7 +196,7 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// .rodata
uint32_t oat_data_alignment = kPageSize;
uint32_t oat_data_offset = expected_offset = RoundUp(expected_offset, oat_data_alignment);
- const OatHeader& oat_header = oat_writer.GetOatHeader();
+ const OatHeader& oat_header = oat_writer->GetOatHeader();
CHECK(oat_header.IsValid());
uint32_t oat_data_size = oat_header.GetExecutableOffset();
expected_offset += oat_data_size;
@@ -210,9 +209,9 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
uint32_t oat_exec_alignment = kPageSize;
CHECK_ALIGNED(expected_offset, kPageSize);
uint32_t oat_exec_offset = expected_offset = RoundUp(expected_offset, oat_exec_alignment);
- uint32_t oat_exec_size = oat_writer.GetSize() - oat_data_size;
+ uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
expected_offset += oat_exec_size;
- CHECK_EQ(oat_data_offset + oat_writer.GetSize(), expected_offset);
+ CHECK_EQ(oat_data_offset + oat_writer->GetSize(), expected_offset);
if (debug) {
LOG(INFO) << "oat_exec_offset=" << oat_exec_offset << std::hex << " " << oat_exec_offset;
LOG(INFO) << "oat_exec_size=" << oat_exec_size << std::hex << " " << oat_exec_size;
@@ -264,6 +263,18 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
uint32_t shstrtab_shstrtab_offset = shstrtab.size();
shstrtab += ".shstrtab";
shstrtab += '\0';
+ uint32_t shstrtab_debug_info_offset = shstrtab.size();
+ shstrtab += ".debug_info";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_abbrev_offset = shstrtab.size();
+ shstrtab += ".debug_abbrev";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_str_offset = shstrtab.size();
+ shstrtab += ".debug_str";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_frame_offset = shstrtab.size();
+ shstrtab += ".debug_frame";
+ shstrtab += '\0';
uint32_t shstrtab_size = shstrtab.size();
expected_offset += shstrtab_size;
if (debug) {
@@ -271,6 +282,52 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
LOG(INFO) << "shstrtab_size=" << shstrtab_size << std::hex << " " << shstrtab_size;
}
+ // Create debug informatin, if we have it.
+ bool generateDebugInformation = compiler_driver_->GetCallFrameInformation() != nullptr;
+ std::vector<uint8_t> dbg_info;
+ std::vector<uint8_t> dbg_abbrev;
+ std::vector<uint8_t> dbg_str;
+ if (generateDebugInformation) {
+ FillInCFIInformation(oat_writer, &dbg_info, &dbg_abbrev, &dbg_str);
+ }
+
+ uint32_t shdbg_info_alignment = 1;
+ uint32_t shdbg_info_offset = expected_offset;
+ uint32_t shdbg_info_size = dbg_info.size();
+ expected_offset += shdbg_info_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_info_offset=" << shdbg_info_offset << std::hex << " " << shdbg_info_offset;
+ LOG(INFO) << "shdbg_info_size=" << shdbg_info_size << std::hex << " " << shdbg_info_size;
+ }
+
+ uint32_t shdbg_abbrev_alignment = 1;
+ uint32_t shdbg_abbrev_offset = expected_offset;
+ uint32_t shdbg_abbrev_size = dbg_abbrev.size();
+ expected_offset += shdbg_abbrev_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_abbrev_offset=" << shdbg_abbrev_offset << std::hex << " " << shdbg_abbrev_offset;
+ LOG(INFO) << "shdbg_abbrev_size=" << shdbg_abbrev_size << std::hex << " " << shdbg_abbrev_size;
+ }
+
+ uint32_t shdbg_frm_alignment = 4;
+ uint32_t shdbg_frm_offset = expected_offset = RoundUp(expected_offset, shdbg_frm_alignment);
+ uint32_t shdbg_frm_size =
+ generateDebugInformation ? compiler_driver_->GetCallFrameInformation()->size() : 0;
+ expected_offset += shdbg_frm_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_frm_offset=" << shdbg_frm_offset << std::hex << " " << shdbg_frm_offset;
+ LOG(INFO) << "shdbg_frm_size=" << shdbg_frm_size << std::hex << " " << shdbg_frm_size;
+ }
+
+ uint32_t shdbg_str_alignment = 1;
+ uint32_t shdbg_str_offset = expected_offset;
+ uint32_t shdbg_str_size = dbg_str.size();
+ expected_offset += shdbg_str_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_str_offset=" << shdbg_str_offset << std::hex << " " << shdbg_str_offset;
+ LOG(INFO) << "shdbg_str_size=" << shdbg_str_size << std::hex << " " << shdbg_str_size;
+ }
+
// section headers (after all sections)
uint32_t shdr_alignment = sizeof(Elf32_Word);
uint32_t shdr_offset = expected_offset = RoundUp(expected_offset, shdr_alignment);
@@ -282,7 +339,11 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
const uint8_t SH_TEXT = 5;
const uint8_t SH_DYNAMIC = 6;
const uint8_t SH_SHSTRTAB = 7;
- const uint8_t SH_NUM = 8;
+ const uint8_t SH_DBG_INFO = 8;
+ const uint8_t SH_DBG_ABRV = 9;
+ const uint8_t SH_DBG_FRM = 10;
+ const uint8_t SH_DBG_STR = 11;
+ const uint8_t SH_NUM = generateDebugInformation ? 12 : 8;
uint32_t shdr_size = sizeof(Elf32_Shdr) * SH_NUM;
expected_offset += shdr_size;
if (debug) {
@@ -559,6 +620,52 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
section_headers[SH_SHSTRTAB].sh_addralign = shstrtab_alignment;
section_headers[SH_SHSTRTAB].sh_entsize = 0;
+ if (generateDebugInformation) {
+ section_headers[SH_DBG_INFO].sh_name = shstrtab_debug_info_offset;
+ section_headers[SH_DBG_INFO].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_INFO].sh_flags = 0;
+ section_headers[SH_DBG_INFO].sh_addr = 0;
+ section_headers[SH_DBG_INFO].sh_offset = shdbg_info_offset;
+ section_headers[SH_DBG_INFO].sh_size = shdbg_info_size;
+ section_headers[SH_DBG_INFO].sh_link = 0;
+ section_headers[SH_DBG_INFO].sh_info = 0;
+ section_headers[SH_DBG_INFO].sh_addralign = shdbg_info_alignment;
+ section_headers[SH_DBG_INFO].sh_entsize = 0;
+
+ section_headers[SH_DBG_ABRV].sh_name = shstrtab_debug_abbrev_offset;
+ section_headers[SH_DBG_ABRV].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_ABRV].sh_flags = 0;
+ section_headers[SH_DBG_ABRV].sh_addr = 0;
+ section_headers[SH_DBG_ABRV].sh_offset = shdbg_abbrev_offset;
+ section_headers[SH_DBG_ABRV].sh_size = shdbg_abbrev_size;
+ section_headers[SH_DBG_ABRV].sh_link = 0;
+ section_headers[SH_DBG_ABRV].sh_info = 0;
+ section_headers[SH_DBG_ABRV].sh_addralign = shdbg_abbrev_alignment;
+ section_headers[SH_DBG_ABRV].sh_entsize = 0;
+
+ section_headers[SH_DBG_FRM].sh_name = shstrtab_debug_frame_offset;
+ section_headers[SH_DBG_FRM].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_FRM].sh_flags = 0;
+ section_headers[SH_DBG_FRM].sh_addr = 0;
+ section_headers[SH_DBG_FRM].sh_offset = shdbg_frm_offset;
+ section_headers[SH_DBG_FRM].sh_size = shdbg_frm_size;
+ section_headers[SH_DBG_FRM].sh_link = 0;
+ section_headers[SH_DBG_FRM].sh_info = 0;
+ section_headers[SH_DBG_FRM].sh_addralign = shdbg_frm_alignment;
+ section_headers[SH_DBG_FRM].sh_entsize = 0;
+
+ section_headers[SH_DBG_STR].sh_name = shstrtab_debug_str_offset;
+ section_headers[SH_DBG_STR].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_STR].sh_flags = 0;
+ section_headers[SH_DBG_STR].sh_addr = 0;
+ section_headers[SH_DBG_STR].sh_offset = shdbg_str_offset;
+ section_headers[SH_DBG_STR].sh_size = shdbg_str_size;
+ section_headers[SH_DBG_STR].sh_link = 0;
+ section_headers[SH_DBG_STR].sh_info = 0;
+ section_headers[SH_DBG_STR].sh_addralign = shdbg_str_alignment;
+ section_headers[SH_DBG_STR].sh_entsize = 0;
+ }
+
// phase 3: writing file
// Elf32_Ehdr
@@ -622,13 +729,13 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
return false;
}
BufferedOutputStream output_stream(new FileOutputStream(elf_file_));
- if (!oat_writer.Write(output_stream)) {
+ if (!oat_writer->Write(&output_stream)) {
PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath();
return false;
}
// .dynamic
- DCHECK_LE(oat_data_offset + oat_writer.GetSize(), dynamic_offset);
+ DCHECK_LE(oat_data_offset + oat_writer->GetSize(), dynamic_offset);
if (static_cast<off_t>(dynamic_offset) != lseek(elf_file_->Fd(), dynamic_offset, SEEK_SET)) {
PLOG(ERROR) << "Failed to seek to .dynamic offset " << dynamic_offset
<< " for " << elf_file_->GetPath();
@@ -651,8 +758,62 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
return false;
}
+ if (generateDebugInformation) {
+ // .debug_info
+ DCHECK_LE(shstrtab_offset + shstrtab_size, shdbg_info_offset);
+ if (static_cast<off_t>(shdbg_info_offset) != lseek(elf_file_->Fd(), shdbg_info_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_info offset " << shdbg_info_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_info[0], shdbg_info_size)) {
+ PLOG(ERROR) << "Failed to write .debug_info for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_abbrev
+ DCHECK_LE(shdbg_info_offset + shdbg_info_size, shdbg_abbrev_offset);
+ if (static_cast<off_t>(shdbg_abbrev_offset) != lseek(elf_file_->Fd(), shdbg_abbrev_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_abbrev offset " << shdbg_abbrev_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_abbrev[0], shdbg_abbrev_size)) {
+ PLOG(ERROR) << "Failed to write .debug_abbrev for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_frame
+ DCHECK_LE(shdbg_abbrev_offset + shdbg_abbrev_size, shdbg_frm_offset);
+ if (static_cast<off_t>(shdbg_frm_offset) != lseek(elf_file_->Fd(), shdbg_frm_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_frm offset " << shdbg_frm_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&((*compiler_driver_->GetCallFrameInformation())[0]), shdbg_frm_size)) {
+ PLOG(ERROR) << "Failed to write .debug_frame for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_str
+ DCHECK_LE(shdbg_frm_offset + shdbg_frm_size, shdbg_str_offset);
+ if (static_cast<off_t>(shdbg_str_offset) != lseek(elf_file_->Fd(), shdbg_str_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_str offset " << shdbg_str_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_str[0], shdbg_str_size)) {
+ PLOG(ERROR) << "Failed to write .debug_frame for " << elf_file_->GetPath();
+ return false;
+ }
+ }
+
// section headers (after all sections)
- DCHECK_LE(shstrtab_offset + shstrtab_size, shdr_offset);
+ if (generateDebugInformation) {
+ DCHECK_LE(shdbg_str_offset + shdbg_str_size, shdr_offset);
+ } else {
+ DCHECK_LE(shstrtab_offset + shstrtab_size, shdr_offset);
+ }
if (static_cast<off_t>(shdr_offset) != lseek(elf_file_->Fd(), shdr_offset, SEEK_SET)) {
PLOG(ERROR) << "Failed to seek to ELF section headers offset " << shdr_offset
<< " for " << elf_file_->GetPath();
@@ -665,6 +826,164 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
VLOG(compiler) << "ELF file written successfully: " << elf_file_->GetPath();
return true;
+} // NOLINT(readability/fn_size)
+
+static void UpdateWord(std::vector<uint8_t>*buf, int offset, int data) {
+ (*buf)[offset+0] = data;
+ (*buf)[offset+1] = data >> 8;
+ (*buf)[offset+2] = data >> 16;
+ (*buf)[offset+3] = data >> 24;
+}
+
+static void PushWord(std::vector<uint8_t>*buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+ buf->push_back((data >> 16) & 0xff);
+ buf->push_back((data >> 24) & 0xff);
+}
+
+static void PushHalf(std::vector<uint8_t>*buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+}
+
+// DWARF constants needed to generate CFI information.
+enum {
+ // Tag encodings.
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_subprogram = 0X2e,
+
+ // Attribute encodings.
+ DW_AT_name = 0x03,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+
+ // Constant encoding.
+ DW_CHILDREN_no = 0x00,
+ DW_CHILDREN_yes = 0x01,
+
+ // Attribute form encodings.
+ DW_FORM_addr = 0x01,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_strp = 0x0e,
+
+ // Language encoding.
+ DW_LANG_Java = 0x000b
+};
+
+void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
+ std::vector<uint8_t>* dbg_info,
+ std::vector<uint8_t>* dbg_abbrev,
+ std::vector<uint8_t>* dbg_str) {
+ // Create the debug_abbrev section with boilerplate information.
+ // We only care about low_pc and high_pc right now for the compilation
+ // unit and methods.
+
+ // Tag 1: Compilation unit: DW_TAG_compile_unit.
+ dbg_abbrev->push_back(1);
+ dbg_abbrev->push_back(DW_TAG_compile_unit);
+
+ // There are children (the methods).
+ dbg_abbrev->push_back(DW_CHILDREN_yes);
+
+ // DW_LANG_Java DW_FORM_data1.
+ dbg_abbrev->push_back(DW_AT_language);
+ dbg_abbrev->push_back(DW_FORM_data1);
+
+ // DW_AT_low_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_low_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // DW_AT_high_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_high_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // End of DW_TAG_compile_unit.
+ PushHalf(dbg_abbrev, 0);
+
+ // Tag 2: Compilation unit: DW_TAG_subprogram.
+ dbg_abbrev->push_back(2);
+ dbg_abbrev->push_back(DW_TAG_subprogram);
+
+ // There are no children.
+ dbg_abbrev->push_back(DW_CHILDREN_no);
+
+ // Name of the method.
+ dbg_abbrev->push_back(DW_AT_name);
+ dbg_abbrev->push_back(DW_FORM_strp);
+
+ // DW_AT_low_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_low_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // DW_AT_high_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_high_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // End of DW_TAG_subprogram.
+ PushHalf(dbg_abbrev, 0);
+
+ // Start the debug_info section with the header information
+ // 'unit_length' will be filled in later.
+ PushWord(dbg_info, 0);
+
+ // 'version' - 3.
+ PushHalf(dbg_info, 3);
+
+ // Offset into .debug_abbrev section (always 0).
+ PushWord(dbg_info, 0);
+
+ // Address size: 4.
+ dbg_info->push_back(4);
+
+ // Start the description for the compilation unit.
+ // This uses tag 1.
+ dbg_info->push_back(1);
+
+ // The language is Java.
+ dbg_info->push_back(DW_LANG_Java);
+
+ // Leave space for low_pc and high_pc.
+ int low_pc_offset = dbg_info->size();
+ PushWord(dbg_info, 0);
+ PushWord(dbg_info, 0);
+
+ // Walk through the information in the method table, and enter into dbg_info.
+ const std::vector<OatWriter::DebugInfo>& dbg = oat_writer->GetCFIMethodInfo();
+ uint32_t low_pc = 0xFFFFFFFFU;
+ uint32_t high_pc = 0;
+
+ for (uint32_t i = 0; i < dbg.size(); i++) {
+ const OatWriter::DebugInfo& info = dbg[i];
+ if (info.low_pc_ < low_pc) {
+ low_pc = info.low_pc_;
+ }
+ if (info.high_pc_ > high_pc) {
+ high_pc = info.high_pc_;
+ }
+
+ // Start a new TAG: subroutine (2).
+ dbg_info->push_back(2);
+
+ // Enter the name into the string table (and NUL terminate).
+ uint32_t str_offset = dbg_str->size();
+ dbg_str->insert(dbg_str->end(), info.method_name_.begin(), info.method_name_.end());
+ dbg_str->push_back('\0');
+
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, str_offset);
+ PushWord(dbg_info, info.low_pc_);
+ PushWord(dbg_info, info.high_pc_);
+ }
+
+ // One byte terminator
+ dbg_info->push_back(0);
+
+ // We have now walked all the methods. Fill in lengths and low/high PCs.
+ UpdateWord(dbg_info, 0, dbg_info->size() - 4);
+ UpdateWord(dbg_info, low_pc_offset, low_pc);
+ UpdateWord(dbg_info, low_pc_offset + 4, high_pc);
}
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index f36d06f79d..dec75dc83f 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -21,11 +21,11 @@
namespace art {
-class ElfWriterQuick : public ElfWriter {
+class ElfWriterQuick FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
static bool Create(File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -33,15 +33,27 @@ class ElfWriterQuick : public ElfWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- virtual bool Write(OatWriter& oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host)
+ bool Write(OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host)
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- ElfWriterQuick(const CompilerDriver& driver, File* elf_file);
- ~ElfWriterQuick();
+ ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
+ : ElfWriter(driver, elf_file) {}
+ ~ElfWriterQuick() {}
+
+ /*
+ * @brief Generate the DWARF debug_info and debug_abbrev sections
+ * @param oat_writer The Oat file Writer.
+ * @param dbg_info Compilation unit information.
+ * @param dbg_abbrev Abbreviations used to generate dbg_info.
+ * @param dbg_str Debug strings.
+ */
+ void FillInCFIInformation(OatWriter* oat_writer, std::vector<uint8_t>* dbg_info,
+ std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str);
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 16e2aa208a..05d6693f70 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -42,22 +42,24 @@ class ImageTest : public CommonCompilerTest {
};
TEST_F(ImageTest, WriteRead) {
- ScratchFile tmp_elf;
+ // Create a root tmp file, to be the base of the .art and .oat temporary files.
+ ScratchFile tmp;
+ ScratchFile tmp_elf(tmp, "oat");
{
{
jobject class_loader = NULL;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
timings.StartSplit("CompileAll");
-#if defined(ART_USE_PORTABLE_COMPILER)
- // TODO: we disable this for portable so the test executes in a reasonable amount of time.
- // We shouldn't need to do this.
- runtime_->SetCompilerFilter(Runtime::kInterpretOnly);
-#endif
+ if (kUsePortableCompiler) {
+ // TODO: we disable this for portable so the test executes in a reasonable amount of time.
+ // We shouldn't need to do this.
+ compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
+ }
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
dex_file->EnableWrite();
}
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
ScopedObjectAccess soa(Thread::Current());
OatWriter oat_writer(class_linker->GetBootClassPath(),
@@ -65,7 +67,7 @@ TEST_F(ImageTest, WriteRead) {
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
- oat_writer,
+ &oat_writer,
tmp_elf.GetFile());
ASSERT_TRUE(success);
timings.EndSplit();
@@ -75,7 +77,7 @@ TEST_F(ImageTest, WriteRead) {
UniquePtr<File> tmp_oat(OS::OpenFileReadWrite(tmp_elf.GetFilename().c_str()));
ASSERT_TRUE(tmp_oat.get() != NULL);
- ScratchFile tmp_image;
+ ScratchFile tmp_image(tmp, "art");
const uintptr_t requested_image_base = ART_BASE_ADDRESS;
{
ImageWriter writer(*compiler_driver_.get());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index aa16885039..e3114eb80d 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -104,6 +104,8 @@ bool ImageWriter::Write(const std::string& image_filename,
portable_to_interpreter_bridge_offset_ =
oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
+ quick_generic_jni_trampoline_offset_ =
+ oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset();
quick_imt_conflict_trampoline_offset_ =
oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset();
quick_resolution_trampoline_offset_ =
@@ -424,8 +426,6 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
- image_roots->Set<false>(ImageHeader::kOatLocation,
- String::AllocFromModifiedUtf8(self, oat_file_->GetLocation().c_str()));
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches);
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
@@ -635,9 +635,18 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// Use original code if it exists. Otherwise, set the code pointer to the resolution
// trampoline.
const byte* quick_code = GetOatAddress(orig->GetQuickOatCodeOffset());
- if (quick_code != nullptr) {
+ if (quick_code != nullptr &&
+ (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
+ // We have code for a non-static or initialized method, just use the code.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+ } else if (quick_code == nullptr && orig->IsNative() && !orig->IsStatic()) {
+ // Non-static native method missing compiled code, use generic JNI version.
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_generic_jni_trampoline_offset_));
+ } else if (quick_code == nullptr && !orig->IsNative()) {
+ // We don't have code at all for a non-native method, use the interpreter.
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
} else {
+ // We have code for a static method, but need to go through the resolution stub for class initialization.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
}
const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -807,6 +816,12 @@ void ImageWriter::PatchOatCodeAndMethods() {
uintptr_t value = quick_code - patch_location + patch->RelativeOffset();
SetPatchLocation(patch, value);
} else {
+ // generic JNI, not interpreter bridge from GetQuickOatCodeFor().
+ if (target->IsNative() &&
+ quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
+ code_offset = quick_generic_jni_trampoline_offset_;
+ }
+
SetPatchLocation(patch, PointerToLowMemUInt32(GetOatAddress(code_offset)));
}
}
@@ -845,7 +860,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
if (patch->IsCall()) {
const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
const DexFile::MethodId& id = cpatch->GetDexFile().GetMethodId(cpatch->GetTargetMethodIdx());
- uintptr_t expected = reinterpret_cast<uintptr_t>(&id);
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
uint32_t actual = *patch_location;
CHECK(actual == expected || actual == value) << std::hex
<< "actual=" << actual
@@ -855,7 +870,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
if (patch->IsType()) {
const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
- uintptr_t expected = reinterpret_cast<uintptr_t>(&id);
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
uint32_t actual = *patch_location;
CHECK(actual == expected || actual == value) << std::hex
<< "actual=" << actual
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a1504eeca8..dff33bad1e 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -41,8 +41,8 @@ class ImageWriter {
: compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0), portable_imt_conflict_trampoline_offset_(0),
- portable_resolution_trampoline_offset_(0), quick_imt_conflict_trampoline_offset_(0),
- quick_resolution_trampoline_offset_(0) {}
+ portable_resolution_trampoline_offset_(0), quick_generic_jni_trampoline_offset_(0),
+ quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0) {}
~ImageWriter() {}
@@ -195,6 +195,7 @@ class ImageWriter {
uint32_t portable_imt_conflict_trampoline_offset_;
uint32_t portable_resolution_trampoline_offset_;
uint32_t portable_to_interpreter_bridge_offset_;
+ uint32_t quick_generic_jni_trampoline_offset_;
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index a5acd2a332..4ce714a183 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -175,16 +175,16 @@ CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
} // namespace llvm
} // namespace art
-inline static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver& driver) {
+static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver& driver) {
void *compiler_context = driver.GetCompilerContext();
CHECK(compiler_context != NULL);
return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
}
-inline static const art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
+static art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
void *compiler_context = driver.GetCompilerContext();
CHECK(compiler_context != NULL);
- return reinterpret_cast<const art::llvm::CompilerLLVM*>(compiler_context);
+ return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
}
extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver) {
@@ -233,7 +233,7 @@ extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& dri
return result;
}
-extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
- std::string const& filename) {
+extern "C" void compilerLLVMSetBitcodeFileName(const art::CompilerDriver& driver,
+ const std::string& filename) {
ContextOf(driver)->SetBitcodeFileName(filename);
}
diff --git a/compiler/llvm/compiler_llvm.h b/compiler/llvm/compiler_llvm.h
index 65bc16bcd8..c2211fb92c 100644
--- a/compiler/llvm/compiler_llvm.h
+++ b/compiler/llvm/compiler_llvm.h
@@ -70,7 +70,7 @@ class CompilerLLVM {
return insn_set_;
}
- void SetBitcodeFileName(std::string const& filename) {
+ void SetBitcodeFileName(const std::string& filename) {
bitcode_filename_ = filename;
}
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 8f22a97968..cf28db3bfc 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "dex_file.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
@@ -1602,7 +1603,7 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
llvm::Value* field_value;
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
@@ -1633,12 +1634,12 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
field_value = irb_.CreateBitCast(field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1664,7 +1665,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
@@ -1698,7 +1699,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
if (is_volatile) {
irb_.CreateMemoryBarrier(art::kStoreStore);
@@ -1707,7 +1708,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1875,8 +1876,8 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1913,7 +1914,7 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
static_field_value = irb_.CreateBitCast(static_field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -1929,11 +1930,11 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
@@ -1960,8 +1961,8 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
new_value = irb_.CreateBitCast(new_value, irb_.getJType(field_jty));
}
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1999,7 +2000,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -2015,7 +2016,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
@@ -2023,7 +2024,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
irb_.CreateMemoryBarrier(art::kStoreStore);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index d23706d9f4..1d027f9d3b 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -199,7 +199,8 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
std::string target_triple;
std::string target_cpu;
std::string target_attr;
- CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), target_triple, target_cpu, target_attr);
+ CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), &target_triple, &target_cpu,
+ &target_attr);
std::string errmsg;
const ::llvm::Target* target =
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 55a962f7fd..93c35022f2 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -105,7 +105,7 @@ TEST_F(OatTest, WriteRead) {
jobject class_loader = NULL;
if (kCompile) {
TimingLogger timings("OatTest::WriteRead", false, false);
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
ScopedObjectAccess soa(Thread::Current());
@@ -119,12 +119,12 @@ TEST_F(OatTest, WriteRead) {
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
- oat_writer,
+ &oat_writer,
tmp.GetFile());
ASSERT_TRUE(success);
if (kCompile) { // OatWriter strips the code, regenerate to compare
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::string error_msg;
UniquePtr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false,
@@ -175,7 +175,7 @@ TEST_F(OatTest, WriteRead) {
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(76U, sizeof(OatHeader));
+ EXPECT_EQ(80U, sizeof(OatHeader));
EXPECT_EQ(28U, sizeof(OatMethodOffsets));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7c5669a3ab..186ab38f86 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -60,6 +60,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
size_portable_imt_conflict_trampoline_(0),
size_portable_resolution_trampoline_(0),
size_portable_to_interpreter_bridge_(0),
+ size_quick_generic_jni_trampoline_(0),
size_quick_imt_conflict_trampoline_(0),
size_quick_resolution_trampoline_(0),
size_quick_to_interpreter_bridge_(0),
@@ -229,7 +230,7 @@ size_t OatWriter::InitOatClasses(size_t offset) {
oat_classes_.push_back(oat_class);
offset += oat_class->SizeOf();
}
- oat_dex_files_[i]->UpdateChecksum(*oat_header_);
+ oat_dex_files_[i]->UpdateChecksum(oat_header_);
}
return offset;
}
@@ -256,6 +257,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
DO_TRAMPOLINE(portable_imt_conflict_trampoline_, PortableImtConflictTrampoline);
DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
+ DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge);
@@ -268,6 +270,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
oat_header_->SetPortableImtConflictTrampolineOffset(0);
oat_header_->SetPortableResolutionTrampolineOffset(0);
oat_header_->SetPortableToInterpreterBridgeOffset(0);
+ oat_header_->SetQuickGenericJniTrampolineOffset(0);
oat_header_->SetQuickImtConflictTrampolineOffset(0);
oat_header_->SetQuickResolutionTrampolineOffset(0);
oat_header_->SetQuickToInterpreterBridgeOffset(0);
@@ -293,7 +296,7 @@ size_t OatWriter::InitOatCodeDexFile(size_t offset,
class_def_index++, (*oat_class_index)++) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
offset = InitOatCodeClassDef(offset, *oat_class_index, class_def_index, dex_file, class_def);
- oat_classes_[*oat_class_index]->UpdateChecksum(*oat_header_);
+ oat_classes_[*oat_class_index]->UpdateChecksum(oat_header_);
}
return offset;
}
@@ -378,6 +381,27 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
uint32_t thumb_offset = compiled_method->CodeDelta();
quick_code_offset = offset + sizeof(code_size) + thumb_offset;
+ std::vector<uint8_t>* cfi_info = compiler_driver_->GetCallFrameInformation();
+ if (cfi_info != nullptr) {
+ // Copy in the FDE, if present
+ const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
+ if (fde != nullptr) {
+ // Copy the information into cfi_info and then fix the address in the new copy.
+ int cur_offset = cfi_info->size();
+ cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+
+ // Set the 'initial_location' field to address the start of the method.
+ uint32_t new_value = quick_code_offset - oat_header_->GetExecutableOffset();
+ uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
+ (*cfi_info)[offset_to_update+0] = new_value;
+ (*cfi_info)[offset_to_update+1] = new_value >> 8;
+ (*cfi_info)[offset_to_update+2] = new_value >> 16;
+ (*cfi_info)[offset_to_update+3] = new_value >> 24;
+ method_info_.push_back(DebugInfo(PrettyMethod(class_def_method_index, dex_file, false),
+ new_value, new_value + code_size));
+ }
+ }
+
// Deduplicate code arrays
SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter =
code_offsets_.find(quick_code);
@@ -482,15 +506,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
method->SetCoreSpillMask(core_spill_mask);
method->SetFpSpillMask(fp_spill_mask);
method->SetOatMappingTableOffset(mapping_table_offset);
- // Don't overwrite static method trampoline
- if (!method->IsStatic() || method->IsConstructor() ||
- method->GetDeclaringClass()->IsInitialized()) {
- // TODO: record portable code offsets: method->SetPortableOatCodeOffset(portable_code_offset);
- method->SetQuickOatCodeOffset(quick_code_offset);
- } else {
- method->SetEntryPointFromPortableCompiledCode(nullptr);
- method->SetEntryPointFromQuickCompiledCode(nullptr);
- }
+ // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
+ method->SetQuickOatCodeOffset(quick_code_offset);
method->SetOatVmapTableOffset(vmap_table_offset);
method->SetOatNativeGcMapOffset(gc_map_offset);
}
@@ -499,42 +516,42 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
}
#define DCHECK_OFFSET() \
- DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out.Seek(0, kSeekCurrent)) \
+ DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " relative_offset=" << relative_offset
#define DCHECK_OFFSET_() \
- DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out.Seek(0, kSeekCurrent)) \
+ DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " offset_=" << offset_
-bool OatWriter::Write(OutputStream& out) {
- const size_t file_offset = out.Seek(0, kSeekCurrent);
+bool OatWriter::Write(OutputStream* out) {
+ const size_t file_offset = out->Seek(0, kSeekCurrent);
- if (!out.WriteFully(oat_header_, sizeof(*oat_header_))) {
- PLOG(ERROR) << "Failed to write oat header to " << out.GetLocation();
+ if (!out->WriteFully(oat_header_, sizeof(*oat_header_))) {
+ PLOG(ERROR) << "Failed to write oat header to " << out->GetLocation();
return false;
}
size_oat_header_ += sizeof(*oat_header_);
- if (!out.WriteFully(image_file_location_.data(), image_file_location_.size())) {
- PLOG(ERROR) << "Failed to write oat header image file location to " << out.GetLocation();
+ if (!out->WriteFully(image_file_location_.data(), image_file_location_.size())) {
+ PLOG(ERROR) << "Failed to write oat header image file location to " << out->GetLocation();
return false;
}
size_oat_header_image_file_location_ += image_file_location_.size();
if (!WriteTables(out, file_offset)) {
- LOG(ERROR) << "Failed to write oat tables to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat tables to " << out->GetLocation();
return false;
}
size_t relative_offset = WriteCode(out, file_offset);
if (relative_offset == 0) {
- LOG(ERROR) << "Failed to write oat code to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
return false;
}
relative_offset = WriteCodeDexFiles(out, file_offset, relative_offset);
if (relative_offset == 0) {
- LOG(ERROR) << "Failed to write oat code for dex files to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat code for dex files to " << out->GetLocation();
return false;
}
@@ -555,6 +572,7 @@ bool OatWriter::Write(OutputStream& out) {
DO_STAT(size_portable_imt_conflict_trampoline_);
DO_STAT(size_portable_resolution_trampoline_);
DO_STAT(size_portable_to_interpreter_bridge_);
+ DO_STAT(size_quick_generic_jni_trampoline_);
DO_STAT(size_quick_imt_conflict_trampoline_);
DO_STAT(size_quick_resolution_trampoline_);
DO_STAT(size_quick_to_interpreter_bridge_);
@@ -577,26 +595,26 @@ bool OatWriter::Write(OutputStream& out) {
#undef DO_STAT
VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \
- CHECK_EQ(file_offset + size_total, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+ CHECK_EQ(file_offset + size_total, static_cast<uint32_t>(out->Seek(0, kSeekCurrent)));
CHECK_EQ(size_, size_total);
}
- CHECK_EQ(file_offset + size_, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+ CHECK_EQ(file_offset + size_, static_cast<uint32_t>(out->Seek(0, kSeekCurrent)));
CHECK_EQ(size_, relative_offset);
return true;
}
-bool OatWriter::WriteTables(OutputStream& out, const size_t file_offset) {
+bool OatWriter::WriteTables(OutputStream* out, const size_t file_offset) {
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
if (!oat_dex_files_[i]->Write(this, out, file_offset)) {
- PLOG(ERROR) << "Failed to write oat dex information to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write oat dex information to " << out->GetLocation();
return false;
}
}
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
uint32_t expected_offset = file_offset + oat_dex_files_[i]->dex_file_offset_;
- off_t actual_offset = out.Seek(expected_offset, kSeekSet);
+ off_t actual_offset = out->Seek(expected_offset, kSeekSet);
if (static_cast<uint32_t>(actual_offset) != expected_offset) {
const DexFile* dex_file = (*dex_files_)[i];
PLOG(ERROR) << "Failed to seek to dex file section. Actual: " << actual_offset
@@ -604,29 +622,29 @@ bool OatWriter::WriteTables(OutputStream& out, const size_t file_offset) {
return false;
}
const DexFile* dex_file = (*dex_files_)[i];
- if (!out.WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) {
+ if (!out->WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) {
PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation()
- << " to " << out.GetLocation();
+ << " to " << out->GetLocation();
return false;
}
size_dex_file_ += dex_file->GetHeader().file_size_;
}
for (size_t i = 0; i != oat_classes_.size(); ++i) {
if (!oat_classes_[i]->Write(this, out, file_offset)) {
- PLOG(ERROR) << "Failed to write oat methods information to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write oat methods information to " << out->GetLocation();
return false;
}
}
return true;
}
-size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
+size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset) {
size_t relative_offset = oat_header_->GetExecutableOffset();
- off_t new_offset = out.Seek(size_executable_offset_alignment_, kSeekCurrent);
+ off_t new_offset = out->Seek(size_executable_offset_alignment_, kSeekCurrent);
size_t expected_file_offset = file_offset + relative_offset;
if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
- << " Expected: " << expected_file_offset << " File: " << out.GetLocation();
+ << " Expected: " << expected_file_offset << " File: " << out->GetLocation();
return 0;
}
DCHECK_OFFSET();
@@ -637,10 +655,10 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
do { \
uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \
uint32_t alignment_padding = aligned_offset - relative_offset; \
- out.Seek(alignment_padding, kSeekCurrent); \
+ out->Seek(alignment_padding, kSeekCurrent); \
size_trampoline_alignment_ += alignment_padding; \
- if (!out.WriteFully(&(*field)[0], field->size())) { \
- PLOG(ERROR) << "Failed to write " # field " to " << out.GetLocation(); \
+ if (!out->WriteFully(&(*field)[0], field->size())) { \
+ PLOG(ERROR) << "Failed to write " # field " to " << out->GetLocation(); \
return false; \
} \
size_ ## field += field->size(); \
@@ -654,6 +672,7 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
DO_TRAMPOLINE(portable_imt_conflict_trampoline_);
DO_TRAMPOLINE(portable_resolution_trampoline_);
DO_TRAMPOLINE(portable_to_interpreter_bridge_);
+ DO_TRAMPOLINE(quick_generic_jni_trampoline_);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
DO_TRAMPOLINE(quick_resolution_trampoline_);
DO_TRAMPOLINE(quick_to_interpreter_bridge_);
@@ -662,7 +681,7 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
return relative_offset;
}
-size_t OatWriter::WriteCodeDexFiles(OutputStream& out,
+size_t OatWriter::WriteCodeDexFiles(OutputStream* out,
const size_t file_offset,
size_t relative_offset) {
size_t oat_class_index = 0;
@@ -678,7 +697,7 @@ size_t OatWriter::WriteCodeDexFiles(OutputStream& out,
return relative_offset;
}
-size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
+size_t OatWriter::WriteCodeDexFile(OutputStream* out, const size_t file_offset,
size_t relative_offset, size_t* oat_class_index,
const DexFile& dex_file) {
for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
@@ -694,12 +713,12 @@ size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
}
void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx,
- const DexFile& dex_file, OutputStream& out) const {
+ const DexFile& dex_file, const OutputStream& out) const {
PLOG(ERROR) << "Failed to write " << what << " for " << PrettyMethod(method_idx, dex_file)
<< " to " << out.GetLocation();
}
-size_t OatWriter::WriteCodeClassDef(OutputStream& out,
+size_t OatWriter::WriteCodeClassDef(OutputStream* out,
const size_t file_offset,
size_t relative_offset,
size_t oat_class_index,
@@ -747,7 +766,7 @@ size_t OatWriter::WriteCodeClassDef(OutputStream& out,
return relative_offset;
}
-size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
+size_t OatWriter::WriteCodeMethod(OutputStream* out, const size_t file_offset,
size_t relative_offset, size_t oat_class_index,
size_t class_def_method_index, size_t* method_offsets_index,
bool is_static, uint32_t method_idx, const DexFile& dex_file) {
@@ -763,12 +782,12 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
uint32_t aligned_offset = compiled_method->AlignCode(relative_offset);
uint32_t aligned_code_delta = aligned_offset - relative_offset;
if (aligned_code_delta != 0) {
- off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent);
+ off_t new_offset = out->Seek(aligned_code_delta, kSeekCurrent);
size_code_alignment_ += aligned_code_delta;
uint32_t expected_offset = file_offset + aligned_offset;
if (static_cast<uint32_t>(new_offset) != expected_offset) {
PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset
- << " Expected: " << expected_offset << " File: " << out.GetLocation();
+ << " Expected: " << expected_offset << " File: " << out->GetLocation();
return 0;
}
relative_offset += aligned_code_delta;
@@ -787,15 +806,15 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
<< PrettyMethod(method_idx, dex_file);
} else {
DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&code_size, sizeof(code_size))) {
- ReportWriteFailure("method code size", method_idx, dex_file, out);
+ if (!out->WriteFully(&code_size, sizeof(code_size))) {
+ ReportWriteFailure("method code size", method_idx, dex_file, *out);
return 0;
}
size_code_size_ += sizeof(code_size);
relative_offset += sizeof(code_size);
DCHECK_OFFSET();
- if (!out.WriteFully(&(*quick_code)[0], code_size)) {
- ReportWriteFailure("method code", method_idx, dex_file, out);
+ if (!out->WriteFully(&(*quick_code)[0], code_size)) {
+ ReportWriteFailure("method code", method_idx, dex_file, *out);
return 0;
}
size_code_ += code_size;
@@ -818,8 +837,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
|| relative_offset == method_offsets.mapping_table_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&mapping_table[0], mapping_table_size)) {
- ReportWriteFailure("mapping table", method_idx, dex_file, out);
+ if (!out->WriteFully(&mapping_table[0], mapping_table_size)) {
+ ReportWriteFailure("mapping table", method_idx, dex_file, *out);
return 0;
}
size_mapping_table_ += mapping_table_size;
@@ -842,8 +861,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
|| relative_offset == method_offsets.vmap_table_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&vmap_table[0], vmap_table_size)) {
- ReportWriteFailure("vmap table", method_idx, dex_file, out);
+ if (!out->WriteFully(&vmap_table[0], vmap_table_size)) {
+ ReportWriteFailure("vmap table", method_idx, dex_file, *out);
return 0;
}
size_vmap_table_ += vmap_table_size;
@@ -866,8 +885,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
|| relative_offset == method_offsets.gc_map_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&gc_map[0], gc_map_size)) {
- ReportWriteFailure("GC map", method_idx, dex_file, out);
+ if (!out->WriteFully(&gc_map[0], gc_map_size)) {
+ ReportWriteFailure("GC map", method_idx, dex_file, *out);
return 0;
}
size_gc_map_ += gc_map_size;
@@ -897,42 +916,42 @@ size_t OatWriter::OatDexFile::SizeOf() const {
+ (sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
-void OatWriter::OatDexFile::UpdateChecksum(OatHeader& oat_header) const {
- oat_header.UpdateChecksum(&dex_file_location_size_, sizeof(dex_file_location_size_));
- oat_header.UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
- oat_header.UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
- oat_header.UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
- oat_header.UpdateChecksum(&methods_offsets_[0],
+void OatWriter::OatDexFile::UpdateChecksum(OatHeader* oat_header) const {
+ oat_header->UpdateChecksum(&dex_file_location_size_, sizeof(dex_file_location_size_));
+ oat_header->UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
+ oat_header->UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
+ oat_header->UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
+ oat_header->UpdateChecksum(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
bool OatWriter::OatDexFile::Write(OatWriter* oat_writer,
- OutputStream& out,
+ OutputStream* out,
const size_t file_offset) const {
DCHECK_OFFSET_();
- if (!out.WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
- PLOG(ERROR) << "Failed to write dex file location length to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
+ PLOG(ERROR) << "Failed to write dex file location length to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_size_ += sizeof(dex_file_location_size_);
- if (!out.WriteFully(dex_file_location_data_, dex_file_location_size_)) {
- PLOG(ERROR) << "Failed to write dex file location data to " << out.GetLocation();
+ if (!out->WriteFully(dex_file_location_data_, dex_file_location_size_)) {
+ PLOG(ERROR) << "Failed to write dex file location data to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_data_ += dex_file_location_size_;
- if (!out.WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
- PLOG(ERROR) << "Failed to write dex file location checksum to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
+ PLOG(ERROR) << "Failed to write dex file location checksum to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_checksum_ += sizeof(dex_file_location_checksum_);
- if (!out.WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
- PLOG(ERROR) << "Failed to write dex file offset to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
+ PLOG(ERROR) << "Failed to write dex file offset to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
- if (!out.WriteFully(&methods_offsets_[0],
+ if (!out->WriteFully(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
- PLOG(ERROR) << "Failed to write methods offsets to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write methods offsets to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_methods_offsets_ +=
@@ -1020,48 +1039,48 @@ size_t OatWriter::OatClass::SizeOf() const {
+ (sizeof(method_offsets_[0]) * method_offsets_.size());
}
-void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
- oat_header.UpdateChecksum(&status_, sizeof(status_));
- oat_header.UpdateChecksum(&type_, sizeof(type_));
+void OatWriter::OatClass::UpdateChecksum(OatHeader* oat_header) const {
+ oat_header->UpdateChecksum(&status_, sizeof(status_));
+ oat_header->UpdateChecksum(&type_, sizeof(type_));
if (method_bitmap_size_ != 0) {
CHECK_EQ(kOatClassSomeCompiled, type_);
- oat_header.UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
- oat_header.UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
+ oat_header->UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
+ oat_header->UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
}
- oat_header.UpdateChecksum(&method_offsets_[0],
- sizeof(method_offsets_[0]) * method_offsets_.size());
+ oat_header->UpdateChecksum(&method_offsets_[0],
+ sizeof(method_offsets_[0]) * method_offsets_.size());
}
bool OatWriter::OatClass::Write(OatWriter* oat_writer,
- OutputStream& out,
+ OutputStream* out,
const size_t file_offset) const {
DCHECK_OFFSET_();
- if (!out.WriteFully(&status_, sizeof(status_))) {
- PLOG(ERROR) << "Failed to write class status to " << out.GetLocation();
+ if (!out->WriteFully(&status_, sizeof(status_))) {
+ PLOG(ERROR) << "Failed to write class status to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_status_ += sizeof(status_);
- if (!out.WriteFully(&type_, sizeof(type_))) {
- PLOG(ERROR) << "Failed to write oat class type to " << out.GetLocation();
+ if (!out->WriteFully(&type_, sizeof(type_))) {
+ PLOG(ERROR) << "Failed to write oat class type to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_type_ += sizeof(type_);
if (method_bitmap_size_ != 0) {
CHECK_EQ(kOatClassSomeCompiled, type_);
- if (!out.WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
- PLOG(ERROR) << "Failed to write method bitmap size to " << out.GetLocation();
+ if (!out->WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
+ PLOG(ERROR) << "Failed to write method bitmap size to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_bitmaps_ += sizeof(method_bitmap_size_);
- if (!out.WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
- PLOG(ERROR) << "Failed to write method bitmap to " << out.GetLocation();
+ if (!out->WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
+ PLOG(ERROR) << "Failed to write method bitmap to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_bitmaps_ += method_bitmap_size_;
}
- if (!out.WriteFully(&method_offsets_[0],
+ if (!out->WriteFully(&method_offsets_[0],
sizeof(method_offsets_[0]) * method_offsets_.size())) {
- PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write method offsets to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 067c78971f..bab1a26d44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -78,10 +78,23 @@ class OatWriter {
return size_;
}
- bool Write(OutputStream& out);
+ bool Write(OutputStream* out);
~OatWriter();
+ struct DebugInfo {
+ DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc)
+ : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc) {
+ }
+ std::string method_name_;
+ uint32_t low_pc_;
+ uint32_t high_pc_;
+ };
+
+ const std::vector<DebugInfo>& GetCFIMethodInfo() const {
+ return method_info_;
+ }
+
private:
size_t InitOatHeader();
size_t InitOatDexFiles(size_t offset);
@@ -105,28 +118,28 @@ class OatWriter {
bool is_native, InvokeType type, uint32_t method_idx, const DexFile&)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WriteTables(OutputStream& out, const size_t file_offset);
- size_t WriteCode(OutputStream& out, const size_t file_offset);
- size_t WriteCodeDexFiles(OutputStream& out, const size_t file_offset, size_t relative_offset);
- size_t WriteCodeDexFile(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ bool WriteTables(OutputStream* out, const size_t file_offset);
+ size_t WriteCode(OutputStream* out, const size_t file_offset);
+ size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
+ size_t WriteCodeDexFile(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t* oat_class_index, const DexFile& dex_file);
- size_t WriteCodeClassDef(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ size_t WriteCodeClassDef(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t oat_class_index, const DexFile& dex_file,
const DexFile::ClassDef& class_def);
- size_t WriteCodeMethod(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ size_t WriteCodeMethod(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t oat_class_index, size_t class_def_method_index,
size_t* method_offsets_index, bool is_static, uint32_t method_idx,
const DexFile& dex_file);
void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
- OutputStream& out) const;
+ const OutputStream& out) const;
class OatDexFile {
public:
explicit OatDexFile(size_t offset, const DexFile& dex_file);
size_t SizeOf() const;
- void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
+ void UpdateChecksum(OatHeader* oat_header) const;
+ bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
// Offset of start of OatDexFile from beginning of OatHeader. It is
// used to validate file position when writing.
@@ -153,8 +166,8 @@ class OatWriter {
size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
size_t SizeOf() const;
- void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
+ void UpdateChecksum(OatHeader* oat_header) const;
+ bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
DCHECK(compiled_methods_ != NULL);
@@ -205,6 +218,8 @@ class OatWriter {
DISALLOW_COPY_AND_ASSIGN(OatClass);
};
+ std::vector<DebugInfo> method_info_;
+
const CompilerDriver* const compiler_driver_;
// note OatFile does not take ownership of the DexFiles
@@ -228,6 +243,7 @@ class OatWriter {
UniquePtr<const std::vector<uint8_t> > portable_imt_conflict_trampoline_;
UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
UniquePtr<const std::vector<uint8_t> > portable_to_interpreter_bridge_;
+ UniquePtr<const std::vector<uint8_t> > quick_generic_jni_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_imt_conflict_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_to_interpreter_bridge_;
@@ -244,6 +260,7 @@ class OatWriter {
uint32_t size_portable_imt_conflict_trampoline_;
uint32_t size_portable_resolution_trampoline_;
uint32_t size_portable_to_interpreter_bridge_;
+ uint32_t size_quick_generic_jni_trampoline_;
uint32_t size_quick_imt_conflict_trampoline_;
uint32_t size_quick_resolution_trampoline_;
uint32_t size_quick_to_interpreter_bridge_;
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index 6f03524651..220ff14baa 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -42,6 +42,8 @@ class ArenaBitVectorAllocator : public Allocator {
ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
bool expandable, OatBitMapKind kind)
- : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {}
+ : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {
+ UNUSED(kind_);
+}
} // namespace art
diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc
index 57c23059de..1fdc110dcf 100644
--- a/compiler/utils/arm/managed_register_arm.cc
+++ b/compiler/utils/arm/managed_register_arm.cc
@@ -21,16 +21,6 @@
namespace art {
namespace arm {
-// We need all registers for caching of locals.
-// Register R9 .. R15 are reserved.
-static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
-static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
-static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
-static const int kNumberOfAvailableOverlappingDRegisters =
- kNumberOfOverlappingDRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
-
// Returns true if this managed-register overlaps the other managed-register.
bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
if (IsNoRegister() || other.IsNoRegister()) return false;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 5e7b2fad21..53f3375073 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -282,7 +282,9 @@ class AssemblerBuffer {
byte* cursor_;
byte* limit_;
AssemblerFixup* fixup_;
+#ifndef NDEBUG
bool fixups_processed_;
+#endif
// Head of linked list of slow paths
SlowPath* slow_path_;
diff --git a/compiler/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc
index 195dafb0a1..5a8c0481a5 100644
--- a/compiler/utils/mips/managed_register_mips.cc
+++ b/compiler/utils/mips/managed_register_mips.cc
@@ -21,17 +21,6 @@
namespace art {
namespace mips {
-// These core registers are never available for allocation.
-static const Register kReservedCoreRegistersArray[] = { S0, S1 };
-
-// We need all registers for caching.
-static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1;
-static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters;
-static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
-static const int kNumberOfAvailableOverlappingDRegisters =
- kNumberOfOverlappingDRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const {
if (IsNoRegister() || other.IsNoRegister()) return false;
CHECK(IsValidManagedRegister());
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 4697d06136..7fae7a8b6f 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -21,19 +21,6 @@
namespace art {
namespace x86 {
-// These cpu registers are never available for allocation.
-static const Register kReservedCpuRegistersArray[] = { ESP };
-
-
-// We reduce the number of available registers for allocation in debug-code
-// mode in order to increase register pressure.
-
-// We need all registers for caching.
-static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
-static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
-
// Define register pairs.
// This list must be kept in sync with the RegisterPair enum.
#define REGISTER_PAIR_LIST(P) \