summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common.mk8
-rw-r--r--build/Android.gtest.mk12
-rw-r--r--compiler/Android.mk15
-rw-r--r--compiler/common_compiler_test.h11
-rw-r--r--compiler/compiled_method.cc15
-rw-r--r--compiler/compiled_method.h9
-rw-r--r--compiler/compiler_backend.cc39
-rw-r--r--compiler/compiler_backend.h34
-rw-r--r--compiler/dex/bb_optimizations.h14
-rw-r--r--compiler/dex/compiler_ir.h3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc8
-rw-r--r--compiler/dex/frontend.cc20
-rw-r--r--compiler/dex/frontend.h2
-rw-r--r--compiler/dex/local_value_numbering.cc73
-rw-r--r--compiler/dex/local_value_numbering_test.cc75
-rw-r--r--compiler/dex/mir_analysis.cc109
-rw-r--r--compiler/dex/mir_field_info.cc124
-rw-r--r--compiler/dex/mir_field_info.h211
-rw-r--r--compiler/dex/mir_graph.cc4
-rw-r--r--compiler/dex/mir_graph.h32
-rw-r--r--compiler/dex/mir_optimization.cc7
-rw-r--r--compiler/dex/pass.h4
-rw-r--r--compiler/dex/pass_driver.cc1
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc1
-rw-r--r--compiler/dex/portable/mir_to_gbc.h7
-rw-r--r--compiler/dex/quick/arm/arm_lir.h23
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc5
-rw-r--r--compiler/dex/quick/arm/call_arm.cc11
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h2
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc60
-rw-r--r--compiler/dex/quick/arm/int_arm.cc312
-rw-r--r--compiler/dex/quick/arm/target_arm.cc39
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc15
-rw-r--r--compiler/dex/quick/codegen_util.cc44
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h11
-rw-r--r--compiler/dex/quick/gen_common.cc314
-rw-r--r--compiler/dex/quick/gen_invoke.cc136
-rw-r--r--compiler/dex/quick/gen_loadstore.cc144
-rw-r--r--compiler/dex/quick/mips/call_mips.cc10
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h2
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc20
-rw-r--r--compiler/dex/quick/mips/int_mips.cc104
-rw-r--r--compiler/dex/quick/mips/mips_lir.h24
-rw-r--r--compiler/dex/quick/mips/target_mips.cc35
-rw-r--r--compiler/dex/quick/mir_to_lir.cc78
-rw-r--r--compiler/dex/quick/mir_to_lir.h17
-rw-r--r--compiler/dex/quick/ralloc_util.cc130
-rw-r--r--compiler/dex/quick/x86/call_x86.cc14
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h21
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc84
-rw-r--r--compiler/dex/quick/x86/int_x86.cc331
-rw-r--r--compiler/dex/quick/x86/target_x86.cc332
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc4
-rw-r--r--compiler/dex/quick/x86/x86_lir.h23
-rw-r--r--compiler/dex/reg_storage.h158
-rw-r--r--compiler/dex/vreg_analysis.cc14
-rw-r--r--compiler/driver/compiler_driver-inl.h165
-rw-r--r--compiler/driver/compiler_driver.cc303
-rw-r--r--compiler/driver/compiler_driver.h119
-rw-r--r--compiler/driver/compiler_driver_test.cc2
-rw-r--r--compiler/driver/compiler_options.h14
-rw-r--r--compiler/elf_writer.cc7
-rw-r--r--compiler/elf_writer.h17
-rw-r--r--compiler/elf_writer_mclinker.cc42
-rw-r--r--compiler/elf_writer_mclinker.h17
-rw-r--r--compiler/elf_writer_quick.cc347
-rw-r--r--compiler/elf_writer_quick.h28
-rw-r--r--compiler/image_test.cc14
-rw-r--r--compiler/image_writer.cc19
-rw-r--r--compiler/image_writer.h5
-rw-r--r--compiler/llvm/compiler_llvm.cc10
-rw-r--r--compiler/llvm/compiler_llvm.h2
-rw-r--r--compiler/llvm/gbc_expander.cc33
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc3
-rw-r--r--compiler/oat_test.cc8
-rw-r--r--compiler/oat_writer.cc184
-rw-r--r--compiler/oat_writer.h41
-rw-r--r--compiler/utils/arena_bit_vector.cc4
-rw-r--r--compiler/utils/arm/managed_register_arm.cc10
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/mips/managed_register_mips.cc11
-rw-r--r--compiler/utils/x86/managed_register_x86.cc13
-rw-r--r--dex2oat/dex2oat.cc16
-rw-r--r--disassembler/Android.mk4
-rw-r--r--runtime/Android.mk40
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc5
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S2
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc5
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S2
-rw-r--r--runtime/arch/quick_alloc_entrypoints.cc108
-rw-r--r--runtime/arch/x86/asm_support_x86.S64
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc5
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S5
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc5
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S6
-rw-r--r--runtime/class_linker-inl.h1
-rw-r--r--runtime/class_linker.cc15
-rw-r--r--runtime/class_linker.h5
-rw-r--r--runtime/class_reference.h1
-rw-r--r--runtime/compiler_callbacks.h1
-rw-r--r--runtime/elf_file.cc358
-rw-r--r--runtime/elf_file.h13
-rw-r--r--runtime/entrypoints/entrypoint_utils.h9
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc88
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.h44
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc9
-rw-r--r--runtime/gc/accounting/card_table-inl.h29
-rw-r--r--runtime/gc/allocator/rosalloc.h3
-rw-r--r--runtime/gc/collector/mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc30
-rw-r--r--runtime/gc/heap.cc113
-rw-r--r--runtime/gc/heap.h23
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space.h7
-rw-r--r--runtime/gc/space/dlmalloc_space.cc5
-rw-r--r--runtime/gc/space/dlmalloc_space.h1
-rw-r--r--runtime/gc/space/dlmalloc_space_base_test.cc (renamed from runtime/gc/space/dlmalloc_space_test.cc)2
-rw-r--r--runtime/gc/space/dlmalloc_space_random_test.cc34
-rw-r--r--runtime/gc/space/dlmalloc_space_static_test.cc34
-rw-r--r--runtime/gc/space/rosalloc_space.cc5
-rw-r--r--runtime/gc/space/rosalloc_space.h1
-rw-r--r--runtime/gc/space/rosalloc_space_base_test.cc (renamed from runtime/gc/space/rosalloc_space_test.cc)2
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc34
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc34
-rw-r--r--runtime/gc/space/space.h3
-rw-r--r--runtime/gc/space/space_test.h68
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/space/zygote_space.h3
-rw-r--r--runtime/instrumentation.cc31
-rw-r--r--runtime/instrumentation.h12
-rw-r--r--runtime/interpreter/interpreter.cc34
-rw-r--r--runtime/interpreter/interpreter_common.cc5
-rw-r--r--runtime/interpreter/interpreter_common.h8
-rw-r--r--runtime/jdwp/jdwp.h4
-rw-r--r--runtime/jni_internal.cc31
-rw-r--r--runtime/jni_internal.h3
-rw-r--r--runtime/mirror/class-inl.h3
-rw-r--r--runtime/mirror/class.h5
-rw-r--r--runtime/mirror/object-inl.h5
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc39
-rw-r--r--runtime/oat.cc26
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/parsed_options.cc739
-rw-r--r--runtime/parsed_options.h105
-rw-r--r--runtime/parsed_options_test.cc (renamed from runtime/runtime_test.cc)8
-rw-r--r--runtime/runtime.cc525
-rw-r--r--runtime/runtime.h82
-rw-r--r--runtime/runtime_android.cc52
-rw-r--r--runtime/runtime_linux.cc14
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/thread.h9
-rw-r--r--runtime/trace.cc6
-rw-r--r--runtime/trace.h6
-rw-r--r--runtime/utils.cc4
-rw-r--r--runtime/zip_archive_test.cc4
-rw-r--r--test/083-compiler-regressions/expected.txt1
-rw-r--r--test/083-compiler-regressions/src/Main.java14
-rwxr-xr-xtools/generate-operator-out.py2
160 files changed, 5325 insertions, 2494 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 704da68df2..28546e9283 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -91,10 +91,12 @@ LLVM_ROOT_PATH := external/llvm
# Don't fail a dalvik minimal host build.
-include $(LLVM_ROOT_PATH)/llvm.mk
-# Clang build.
-# ART_TARGET_CLANG := true
+# Clang build support.
+ART_TARGET_CLANG := false
ifeq ($(HOST_OS),darwin)
-ART_HOST_CLANG := true
+ ART_HOST_CLANG := true
+else
+ ART_HOST_CLANG := false
endif
# directory used for dalvik-cache on device
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 2ddd09ec3e..6907603292 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -36,8 +36,12 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/exception_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
- runtime/gc/space/dlmalloc_space_test.cc \
- runtime/gc/space/rosalloc_space_test.cc \
+ runtime/gc/space/dlmalloc_space_base_test.cc \
+ runtime/gc/space/dlmalloc_space_static_test.cc \
+ runtime/gc/space/dlmalloc_space_random_test.cc \
+ runtime/gc/space/rosalloc_space_base_test.cc \
+ runtime/gc/space/rosalloc_space_static_test.cc \
+ runtime/gc/space/rosalloc_space_random_test.cc \
runtime/gc/space/large_object_space_test.cc \
runtime/gtest_test.cc \
runtime/indenter_test.cc \
@@ -47,8 +51,8 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
runtime/mirror/object_test.cc \
+ runtime/parsed_options_test.cc \
runtime/reference_table_test.cc \
- runtime/runtime_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
runtime/utils_test.cc \
@@ -155,7 +159,6 @@ define build-art-test
LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils
LOCAL_STATIC_LIBRARIES += libgtest
LOCAL_MODULE_PATH := $(ART_NATIVETEST_OUT)
- include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_EXECUTABLE)
art_gtest_exe := $$(LOCAL_MODULE_PATH)/$$(LOCAL_MODULE)
ART_TARGET_GTEST_EXECUTABLES += $$(art_gtest_exe)
@@ -168,7 +171,6 @@ define build-art-test
# Mac OS complains about unresolved symbols if you don't include this.
LOCAL_WHOLE_STATIC_LIBRARIES := libgtest_host
endif
- include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_EXECUTABLE)
art_gtest_exe := $(HOST_OUT_EXECUTABLES)/$$(LOCAL_MODULE)
ART_HOST_GTEST_EXECUTABLES += $$(art_gtest_exe)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 8f840cc3f5..2f785ce5d7 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -50,6 +50,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
dex/mir_dataflow.cc \
+ dex/mir_field_info.cc \
dex/mir_optimization.cc \
dex/pass_driver.cc \
dex/bb_optimizations.cc \
@@ -106,7 +107,7 @@ endif
LIBART_COMPILER_CFLAGS :=
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
-LIBART_COMPILER_SRC_FILES +=
+LIBART_COMPILER_SRC_FILES += \
dex/portable/mir_to_gbc.cc \
elf_writer_mclinker.cc \
jni/portable/jni_compiler.cc \
@@ -120,11 +121,12 @@ LIBART_COMPILER_SRC_FILES +=
llvm/runtime_support_builder.cc \
llvm/runtime_support_builder_arm.cc \
llvm/runtime_support_builder_x86.cc
- LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
+LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/compiler_enums.h \
+ dex/quick/dex_file_method_inliner.h
# $(1): target or host
# $(2): ndebug or debug
@@ -211,12 +213,15 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
ifeq ($(TARGET_ARCH),arm64)
$$(info TODOAArch64: $$(LOCAL_PATH)/Android.mk Add Arm64 specific MCLinker libraries)
endif # TARGET_ARCH != arm64
+ include $(LLVM_DEVICE_BUILD_MK)
else # host
LOCAL_STATIC_LIBRARIES += libmcldARMInfo libmcldARMTarget
LOCAL_STATIC_LIBRARIES += libmcldX86Info libmcldX86Target
LOCAL_STATIC_LIBRARIES += libmcldMipsInfo libmcldMipsTarget
+ include $(LLVM_HOST_BUILD_MK)
endif
LOCAL_STATIC_LIBRARIES += libmcldCore libmcldObject libmcldADT libmcldFragment libmcldTarget libmcldCodeGen libmcldLDVariant libmcldMC libmcldSupport libmcldLD
+ include $(LLVM_GEN_INTRINSICS_MK)
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
@@ -228,13 +233,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_STATIC_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 0999d09e71..3bdc95ea54 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -203,8 +203,11 @@ class CommonCompilerTest : public CommonRuntimeTest {
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
} else {
// No code? You must mean to go into the interpreter.
- const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
- : GetQuickToInterpreterBridge();
+ // Or the generic JNI...
+ const void* method_code = method->IsNative() ? GetQuickGenericJniTrampoline()
+ : (kUsePortableCompiler
+ ? GetPortableToInterpreterBridge()
+ : GetQuickToInterpreterBridge());
OatFile::OatMethod oat_method = CreateOatMethod(method_code,
kStackAlignment,
0,
@@ -240,7 +243,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
#if GCC_VERSION >= 40303
__builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
#else
- LOG(FATAL) << "UNIMPLEMENTED: cache flush";
+ LOG(WARNING) << "UNIMPLEMENTED: cache flush";
#endif
}
@@ -353,7 +356,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
CHECK(method != nullptr);
TimingLogger timings("CommonTest::CompileMethod", false, false);
timings.StartSplit("CompileOne");
- compiler_driver_->CompileOne(method, timings);
+ compiler_driver_->CompileOne(method, &timings);
MakeExecutable(method);
timings.EndSplit();
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f6d724ab56..d884bc0ef8 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -153,12 +153,14 @@ CompiledMethod::CompiledMethod(CompilerDriver& driver,
const uint32_t fp_spill_mask,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
- const std::vector<uint8_t>& native_gc_map)
+ const std::vector<uint8_t>& native_gc_map,
+ const std::vector<uint8_t>* cfi_info)
: CompiledCode(&driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
mapping_table_(driver.DeduplicateMappingTable(mapping_table)),
vmap_table_(driver.DeduplicateVMapTable(vmap_table)),
- gc_map_(driver.DeduplicateGCMap(native_gc_map)) {
+ gc_map_(driver.DeduplicateGCMap(native_gc_map)),
+ cfi_info_(driver.DeduplicateCFIInfo(cfi_info)) {
}
CompiledMethod::CompiledMethod(CompilerDriver& driver,
@@ -169,10 +171,11 @@ CompiledMethod::CompiledMethod(CompilerDriver& driver,
const uint32_t fp_spill_mask)
: CompiledCode(&driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask) {
- mapping_table_ = driver.DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver.DeduplicateVMapTable(std::vector<uint8_t>());
- gc_map_ = driver.DeduplicateGCMap(std::vector<uint8_t>());
+ core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ mapping_table_(driver.DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver.DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver.DeduplicateGCMap(std::vector<uint8_t>())),
+ cfi_info_(nullptr) {
}
// Constructs a CompiledMethod for the Portable compiler.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 611230509a..90ae6eeae8 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -110,7 +110,8 @@ class CompiledMethod : public CompiledCode {
const uint32_t fp_spill_mask,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
- const std::vector<uint8_t>& native_gc_map);
+ const std::vector<uint8_t>& native_gc_map,
+ const std::vector<uint8_t>* cfi_info);
// Constructs a CompiledMethod for the QuickJniCompiler.
CompiledMethod(CompilerDriver& driver,
@@ -157,6 +158,10 @@ class CompiledMethod : public CompiledCode {
return *gc_map_;
}
+ const std::vector<uint8_t>* GetCFIInfo() const {
+ return cfi_info_;
+ }
+
private:
// For quick code, the size of the activation used by the code.
const size_t frame_size_in_bytes_;
@@ -172,6 +177,8 @@ class CompiledMethod : public CompiledCode {
// For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
// are live. For portable code, the key is a dalvik PC.
std::vector<uint8_t>* gc_map_;
+ // For quick code, a FDE entry for the debug_frame section.
+ std::vector<uint8_t>* cfi_info_;
};
} // namespace art
diff --git a/compiler/compiler_backend.cc b/compiler/compiler_backend.cc
index eaa39f83c1..0afa665eb7 100644
--- a/compiler/compiler_backend.cc
+++ b/compiler/compiler_backend.cc
@@ -83,6 +83,9 @@ static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler,
}
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization();
+
class QuickBackend : public CompilerBackend {
public:
QuickBackend() : CompilerBackend(100) {}
@@ -135,10 +138,11 @@ class QuickBackend : public CompilerBackend {
}
bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
}
@@ -165,11 +169,27 @@ class QuickBackend : public CompilerBackend {
bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
CHECK(set_max);
}
- return mir_to_lir;;
+ return mir_to_lir;
}
void InitCompilationUnit(CompilationUnit& cu) const {}
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
+ OVERRIDE {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization();
+ }
+ return nullptr;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(QuickBackend);
};
@@ -249,11 +269,12 @@ class LLVMBackend : public CompilerBackend {
}
bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return art::ElfWriterMclinker::Create(
file, oat_writer, dex_files, android_root, is_host, driver);
}
@@ -271,15 +292,17 @@ class LLVMBackend : public CompilerBackend {
(1 << kSuppressExceptionEdges);
}
- bool isPortable() const { return true; }
+ bool IsPortable() const OVERRIDE {
+ return true;
+ }
- void SetBitcodeFileName(std::string const& filename) {
- typedef void (*SetBitcodeFileNameFn)(CompilerDriver&, std::string const&);
+ void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+ typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
SetBitcodeFileNameFn set_bitcode_file_name =
reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
- set_bitcode_file_name(*this, filename);
+ set_bitcode_file_name(driver, filename);
}
private:
diff --git a/compiler/compiler_backend.h b/compiler/compiler_backend.h
index 01a69afc89..b473806bba 100644
--- a/compiler/compiler_backend.h
+++ b/compiler/compiler_backend.h
@@ -23,7 +23,7 @@
namespace art {
class Backend;
-class CompilationUnit;
+struct CompilationUnit;
class CompilerDriver;
class CompiledMethod;
class MIRGraph;
@@ -40,8 +40,9 @@ class CompilerBackend {
kPortable
};
- explicit CompilerBackend(int warning)
- : maximum_compilation_time_before_warning_(warning) {}
+ explicit CompilerBackend(uint64_t warning)
+ : maximum_compilation_time_before_warning_(warning) {
+ }
static CompilerBackend* Create(Kind kind);
@@ -49,7 +50,7 @@ class CompilerBackend {
virtual void UnInit(CompilerDriver& driver) const = 0;
- virtual CompiledMethod* Compile(CompilerDriver& compiler,
+ virtual CompiledMethod* Compile(CompilerDriver& driver,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -66,7 +67,7 @@ class CompilerBackend {
virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const = 0;
virtual bool WriteElf(art::File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
bool is_host, const CompilerDriver& driver) const
@@ -79,8 +80,12 @@ class CompilerBackend {
return maximum_compilation_time_before_warning_;
}
- virtual bool IsPortable() const { return false; }
- void SetBitcodeFileName(std::string const& filename) {
+ virtual bool IsPortable() const {
+ return false;
+ }
+
+ void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+ UNUSED(driver);
UNUSED(filename);
}
@@ -88,8 +93,21 @@ class CompilerBackend {
virtual ~CompilerBackend() {}
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
+ const {
+ return nullptr;
+ }
+
private:
- uint64_t maximum_compilation_time_before_warning_;
+ const uint64_t maximum_compilation_time_before_warning_;
DISALLOW_COPY_AND_ASSIGN(CompilerBackend);
};
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1286a8e52e..bd7c40ba5b 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -23,6 +23,20 @@
namespace art {
/**
+ * @class CacheFieldLoweringInfo
+ * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
+ */
+class CacheFieldLoweringInfo : public Pass {
+ public:
+ CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+ }
+
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->DoCacheFieldLoweringInfo();
+ }
+};
+
+/**
* @class CodeLayout
* @brief Perform the code layout pass.
*/
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 8447d23ddc..ee880417ac 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -63,8 +63,9 @@ struct CompilationUnit {
bool verbose;
const CompilerBackend* compiler_backend;
InstructionSet instruction_set;
+ bool target64;
- const InstructionSetFeatures& GetInstructionSetFeatures() {
+ InstructionSetFeatures GetInstructionSetFeatures() {
return compiler_driver->GetInstructionSetFeatures();
}
// TODO: much of this info available elsewhere. Go to the original source?
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff8fea0f88..b9f9437c95 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -208,21 +208,21 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
return;
}
uint32_t field_idx = inst->VRegC_22c();
- int field_offset;
+ MemberOffset field_offset(0u);
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset)) {
+ if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
- << " by field offset " << field_offset
+ << " by field offset " << field_offset.Int32Value()
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
// We are modifying 4 consecutive bytes.
inst->SetOpcode(new_opcode);
// Replace field index by field offset.
- inst->SetVRegC_22c(static_cast<uint16_t>(field_offset));
+ inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
}
}
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 6800f7b2a4..b55b4715eb 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -128,7 +128,7 @@ void CompilationUnit::EndTiming() {
}
}
-static CompiledMethod* CompileMethod(CompilerDriver& compiler,
+static CompiledMethod* CompileMethod(CompilerDriver& driver,
CompilerBackend* compiler_backend,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
@@ -143,12 +143,14 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CompilationUnit cu(&compiler.GetArenaPool());
+ CompilationUnit cu(driver.GetArenaPool());
- cu.compiler_driver = &compiler;
+ cu.compiler_driver = &driver;
cu.class_linker = class_linker;
- cu.instruction_set = compiler.GetInstructionSet();
+ cu.instruction_set = driver.GetInstructionSet();
+ cu.target64 = cu.instruction_set == kX86_64;
cu.compiler_backend = compiler_backend;
+ // TODO: x86_64 is not yet implemented.
DCHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kX86) ||
(cu.instruction_set == kMips));
@@ -216,8 +218,8 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
/* Create the pass driver and launch it */
- PassDriver driver(&cu);
- driver.Launch();
+ PassDriver pass_driver(&cu);
+ pass_driver.Launch();
if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
cu.mir_graph->DumpCheckStats();
@@ -257,9 +259,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& compiler,
}
cu.EndTiming();
- compiler.GetTimingsLogger().Start();
- compiler.GetTimingsLogger().AddLogger(cu.timings);
- compiler.GetTimingsLogger().End();
+ driver.GetTimingsLogger()->Start();
+ driver.GetTimingsLogger()->AddLogger(cu.timings);
+ driver.GetTimingsLogger()->End();
return result;
}
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 8ce12067ee..22a7b8cfb0 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -105,7 +105,7 @@ class LLVMInfo {
UniquePtr<art::llvm::IRBuilder> ir_builder_;
};
-struct CompiledMethod;
+class CompiledMethod;
class CompilerDriver;
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a3ea034902..61c676784f 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,6 +16,7 @@
#include "local_value_numbering.h"
+#include "mir_field_info.h"
#include "mir_graph.h"
namespace art {
@@ -534,16 +535,24 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
+ uint16_t type = opcode - Instruction::IGET;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, base);
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_ifield_version_[type],
+ GetMemoryVersion(base, field_id, type));
+ }
if (opcode == Instruction::IGET_WIDE) {
res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -567,10 +576,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
HandleNullCheck(mir, base);
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_ifield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_ifield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
@@ -581,14 +598,22 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
+ uint16_t type = opcode - Instruction::SGET;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_sfield_version_[type],
+ GetMemoryVersion(NO_VALUE, field_id, type));
+ }
if (opcode == Instruction::SGET_WIDE) {
res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -609,10 +634,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
uint16_t type = opcode - Instruction::SPUT;
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_sfield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_sfield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 6ab6c51a1f..4599612db6 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -44,7 +44,7 @@ class LocalValueNumberingTest : public testing::Test {
Instruction::Code opcode;
int64_t value;
- uint32_t field_annotation;
+ uint32_t field_info;
size_t num_uses;
int32_t uses[kMaxSsaUses];
size_t num_defs;
@@ -55,28 +55,41 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, value, 0u, 0, { }, 1, { reg } }
#define DEF_CONST_WIDE(opcode, reg, value) \
{ opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_IGET(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_IGET(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
#define DEF_INVOKE1(opcode, reg) \
{ opcode, 0u, 0u, 1, { reg }, 0, { } }
#define DEF_UNIQUE_REF(opcode, reg) \
{ opcode, 0u, 0u, 0, { }, 1, { reg } } // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+ cu_.mir_graph->ifield_lowering_infos_.Reset();
+ cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const IFieldDef* def = &defs[i];
+ MirIFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = 0u | // Without kFlagIsStatic.
+ (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -85,6 +98,19 @@ class LocalValueNumberingTest : public testing::Test {
}
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.Reset();
+ cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
+ (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -102,6 +128,13 @@ class LocalValueNumberingTest : public testing::Test {
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
+ if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+ mir->meta.ifield_lowering_info = def->field_info;
+ } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ mir->meta.sfield_lowering_info = def->field_info;
+ }
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
@@ -146,7 +179,6 @@ class LocalValueNumberingTest : public testing::Test {
LocalValueNumbering lvn_;
};
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false }
@@ -169,7 +201,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
static const IFieldDef ifields[] = {
@@ -197,7 +228,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
EXPECT_EQ(mirs_[4].optimization_flags, 0u);
}
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -218,9 +248,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -241,9 +269,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -267,7 +293,6 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestVolatile) {
static const IFieldDef ifields[] = {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8ef80fa6bb..d159f49b3e 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -14,11 +14,15 @@
* limitations under the License.
*/
+#include <algorithm>
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
+#include "UniquePtr.h"
namespace art {
@@ -1090,4 +1094,109 @@ bool MIRGraph::SkipCompilation() {
return ComputeSkipCompilation(&stats, skip_compilation);
}
+void MIRGraph::DoCacheFieldLoweringInfo() {
+ // Try to use stack-allocated array, resort to heap if we exceed the initial size.
+ static constexpr size_t kInitialSize = 32;
+ uint16_t stack_idxs[kInitialSize];
+ UniquePtr<uint16_t[]> allocated_idxs;
+ uint16_t* field_idxs = stack_idxs;
+ size_t size = kInitialSize;
+
+ // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
+ size_t ifield_pos = 0u;
+ size_t sfield_pos = size;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->block_type != kDalvikByteCode) {
+ continue;
+ }
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::IGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ bool need_alloc = false;
+ const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+ uint16_t field_idx;
+ // Get field index and try to find it among existing indexes. If found, it's usually among
+ // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+ // is a linear search, it actually performs much better than map based approach.
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idx = insn->VRegC_22c();
+ size_t i = ifield_pos;
+ while (i != 0u && field_idxs[i - 1] != field_idx) {
+ --i;
+ }
+ if (i != 0u) {
+ mir->meta.ifield_lowering_info = i - 1;
+ } else {
+ mir->meta.ifield_lowering_info = ifield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[ifield_pos++] = field_idx;
+ }
+ }
+ } else {
+ field_idx = insn->VRegB_21c();
+ size_t i = sfield_pos;
+ while (i != size && field_idxs[i] != field_idx) {
+ ++i;
+ }
+ if (i != size) {
+ mir->meta.sfield_lowering_info = size - i - 1u;
+ } else {
+ mir->meta.sfield_lowering_info = size - sfield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ }
+ if (UNLIKELY(need_alloc)) {
+ DCHECK(field_idxs == stack_idxs);
+ // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
+ uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+ allocated_idxs.reset(new uint16_t[max_refs]);
+ field_idxs = allocated_idxs.get();
+ size_t sfield_count = size - sfield_pos;
+ sfield_pos = max_refs - sfield_count;
+ size = max_refs;
+ memcpy(field_idxs, stack_idxs, ifield_pos * sizeof(field_idxs[0]));
+ memcpy(field_idxs + sfield_pos, stack_idxs + ifield_pos,
+ sfield_count * sizeof(field_idxs[0]));
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idxs[ifield_pos++] = field_idx;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ DCHECK_LE(ifield_pos, sfield_pos);
+ }
+ }
+ }
+
+ if (ifield_pos != 0u) {
+ // Resolve instance field infos.
+ DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
+ ifield_lowering_infos_.Resize(ifield_pos);
+ for (size_t pos = 0u; pos != ifield_pos; ++pos) {
+ ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ ifield_lowering_infos_.GetRawStorage(), ifield_pos);
+ }
+
+ if (sfield_pos != size) {
+ // Resolve static field infos.
+ DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
+ sfield_lowering_infos_.Resize(size - sfield_pos);
+ for (size_t pos = size; pos != sfield_pos;) {
+ --pos;
+ sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ sfield_lowering_infos_.GetRawStorage(), size - sfield_pos);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
new file mode 100644
index 0000000000..96eda01d1e
--- /dev/null
+++ b/compiler/dex/mir_field_info.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_field_info.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirIFieldLoweringInfo unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
+
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ it->flags_ = 0u | // Without kFlagIsStatic.
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u);
+ }
+}
+
+void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirSFieldLoweringInfo unresolved(it->field_idx_);
+ // In 64-bit builds, there's padding after storage_index_, don't include it in memcmp.
+ size_t size = OFFSETOF_MEMBER(MirSFieldLoweringInfo, storage_index_) +
+ sizeof(it->storage_index_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, size), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
+
+ bool is_referrers_class, is_initialized;
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ &it->storage_index_, &is_referrers_class, &is_initialized);
+ it->flags_ = kFlagIsStatic |
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u) |
+ (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+ (is_initialized ? kFlagIsInitialized : 0u);
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
new file mode 100644
index 0000000000..41cb4cee14
--- /dev/null
+++ b/compiler/dex/mir_field_info.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+#define ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+
+/*
+ * Field info is calculated from the perspective of the compilation unit that accesses
+ * the field and stored in that unit's MIRGraph. Therefore it does not need to reference the
+ * dex file or method for which it has been calculated. However, we do store the declaring
+ * field index, class index and dex file of the resolved field to help distinguish between fields.
+ */
+
+class MirFieldInfo {
+ public:
+ uint16_t FieldIndex() const {
+ return field_idx_;
+ }
+
+ bool IsStatic() const {
+ return (flags_ & kFlagIsStatic) != 0u;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringFieldIndex() const {
+ return declaring_field_idx_;
+ }
+
+ bool IsVolatile() const {
+ return (flags_ & kFlagIsVolatile) != 0u;
+ }
+
+ protected:
+ enum {
+ kBitIsStatic = 0,
+ kBitIsVolatile,
+ kFieldInfoBitEnd
+ };
+ static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
+ static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+ MirFieldInfo(uint16_t field_idx, uint16_t flags)
+ : field_idx_(field_idx),
+ flags_(flags),
+ declaring_field_idx_(0u),
+ declaring_class_idx_(0u),
+ declaring_dex_file_(nullptr) {
+ }
+
+ // Make copy-ctor/assign/dtor protected to avoid slicing.
+ MirFieldInfo(const MirFieldInfo& other) = default;
+ MirFieldInfo& operator=(const MirFieldInfo& other) = default;
+ ~MirFieldInfo() = default;
+
+ // The field index in the compiling method's dex file.
+ uint16_t field_idx_;
+ // Flags, for volatility and derived class data.
+ uint16_t flags_;
+ // The field index in the dex file that defines field, 0 if unresolved.
+ uint16_t declaring_field_idx_;
+ // The type index of the class declaring the field, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+};
+
+class MirIFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested instance field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access
+ // with IGET/IPUT. For fast path fields, retrieve the field offset.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved instance field lowering info.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile), // Without kFlagIsStatic.
+ field_offset_(0u) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kIFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+
+ friend class LocalValueNumberingTest;
+};
+
+class MirSFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested static field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access with
+ // IGET/IPUT. For fast path fields (at least for IGET), retrieve the information needed for
+ // the field access, i.e. the field offset, whether the field is in the same class as the
+ // method being compiled, whether the declaring class can be safely assumed to be initialized
+ // and the type index of the declaring class in the compiled method's dex file.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved static field lowering info.
+ explicit MirSFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic),
+ field_offset_(0u),
+ storage_index_(DexFile::kDexNoIndex) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ bool IsReferrersClass() const {
+ return (flags_ & kFlagIsReferrersClass) != 0u;
+ }
+
+ bool IsInitialized() const {
+ return (flags_ & kFlagIsInitialized) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ uint32_t StorageIndex() const {
+ return storage_index_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kBitIsReferrersClass,
+ kBitIsInitialized,
+ kSFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+ static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
+ static constexpr uint16_t kFlagIsInitialized = 1u << kBitIsInitialized;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+ // The type index of the declaring class in the compiling method's dex file,
+ // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
+ uint32_t storage_index_;
+
+ friend class LocalValueNumberingTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_MIR_FIELD_INFO_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 2bfc15459a..46e854fb2b 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -87,7 +87,9 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
compiler_temps_(arena, 6, kGrowableArrayMisc),
num_non_special_compiler_temps_(0),
max_available_non_special_compiler_temps_(0),
- punt_to_interpreter_(false) {
+ punt_to_interpreter_(false),
+ ifield_lowering_infos_(arena, 0u),
+ sfield_lowering_infos_(arena, 0u) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 28e94709e8..08bf647c95 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,8 +20,11 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "compiler_ir.h"
+#include "mir_field_info.h"
+#include "invoke_type.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
+#include "reg_storage.h"
namespace art {
@@ -165,7 +168,7 @@ enum OatMethodAttributes {
#define INVALID_SREG (-1)
#define INVALID_VREG (0xFFFFU)
-#define INVALID_REG (0xFF)
+#define INVALID_REG (0x7F)
#define INVALID_OFFSET (0xDEADF00FU)
#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
@@ -258,6 +261,12 @@ struct MIR {
MIR* throw_insn;
// Fused cmp branch condition.
ConditionCode ccode;
+ // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
+ uint32_t ifield_lowering_info;
+ // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
+ uint32_t sfield_lowering_info;
} meta;
};
@@ -328,9 +337,8 @@ struct RegLocation {
unsigned ref:1; // Something GC cares about.
unsigned high_word:1; // High word of pair?
unsigned home:1; // Does this represent the home location?
- VectorLengthType vec_len:3; // Is this value in a vector register, and how big is it?
- uint8_t low_reg; // First physical register.
- uint8_t high_reg; // 2nd physical register (if wide).
+ VectorLengthType vec_len:3; // TODO: remove. Is this value in a vector register, and how big is it?
+ RegStorage reg; // Encoded physical registers.
int16_t s_reg_low; // SSA name for low Dalvik word.
int16_t orig_sreg; // TODO: remove after Bitcode gen complete
// and consolidate usage w/ s_reg_low.
@@ -361,7 +369,7 @@ struct CallInfo {
const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
- INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::kInvalid), INVALID_SREG, INVALID_SREG};
class MIRGraph {
public:
@@ -466,6 +474,18 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ void DoCacheFieldLoweringInfo();
+
+ const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
+ return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
+ }
+
+ const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
+ return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
+ }
+
void InitRegLocations();
void RemapRegLocations();
@@ -923,6 +943,8 @@ class MIRGraph {
size_t max_available_non_special_compiler_temps_;
size_t max_available_special_compiler_temps_;
bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
+ GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
+ GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 67d476929e..14d03a518c 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -207,10 +207,11 @@ size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
}
}
+
+// FIXME - will probably need to revisit all uses of this, as type not defined.
static const RegLocation temp_loc = {kLocCompilerTemp,
- 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
- kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG,
- INVALID_SREG};
+ 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/, kVectorNotUsed,
+ RegStorage(), INVALID_SREG, INVALID_SREG};
CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
// There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 255892e324..9457d5be76 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -22,8 +22,8 @@
namespace art {
// Forward declarations.
-class BasicBlock;
-class CompilationUnit;
+struct BasicBlock;
+struct CompilationUnit;
class Pass;
/**
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index b60f29697b..256bcb1473 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -91,6 +91,7 @@ void PassDriver::CreatePasses() {
* - This is not yet an issue: no current pass would require it.
*/
static const Pass* const passes[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
GetPassInstance<CodeLayout>(),
GetPassInstance<SSATransformation>(),
GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 3187fbb28c..70438ecd50 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -30,6 +30,7 @@
#include "dex/compiler_internals.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex/frontend.h"
+#include "llvm/ir_builder.h"
#include "llvm/llvm_compilation_unit.h"
#include "llvm/utils_llvm.h"
#include "mir_to_gbc.h"
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 2b681f6097..e97634c519 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -17,11 +17,18 @@
#ifndef ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
#define ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
+#include <llvm/ADT/ArrayRef.h>
+#include <llvm/IR/BasicBlock.h>
+#include <llvm/IR/IRBuilder.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/Module.h>
+
#include "invoke_type.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
#include "dex/backend.h"
+#include "llvm/intrinsic_helper.h"
#include "llvm/llvm_compilation_unit.h"
#include "safe_map.h"
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 37b4ec6dc7..9f52f20f35 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -117,14 +117,6 @@ namespace art {
// Mask to strip off fp flags.
#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
-// RegisterLocation templates return values (r0, or r0/r1).
-#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, INVALID_REG, \
- INVALID_SREG, INVALID_SREG}
-#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1, \
- INVALID_SREG, INVALID_SREG}
-#define ARM_LOC_C_RETURN_FLOAT ARM_LOC_C_RETURN
-#define ARM_LOC_C_RETURN_DOUBLE ARM_LOC_C_RETURN_WIDE
-
enum ArmResourceEncodingPos {
kArmGPReg0 = 0,
kArmRegSP = 13,
@@ -225,6 +217,20 @@ enum ArmNativeRegisterPool {
#define rARM_INVOKE_TGT rARM_LR
#define rARM_COUNT INVALID_REG
+// RegisterLocation templates return values (r0, or r0/r1).
+const RegLocation arm_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+
enum ArmShiftEncodings {
kArmLsl = 0x0,
kArmLsr = 0x1,
@@ -451,7 +457,6 @@ enum ArmOpcode {
kThumb2MovImm16LST, // Special purpose version for switch table use.
kThumb2MovImm16HST, // Special purpose version for switch table use.
kThumb2LdmiaWB, // ldmia [111010011001[ rn[19..16] mask[15..0].
- kThumb2SubsRRI12, // setflags encoding.
kThumb2OrrRRRs, // orrs [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
kThumb2Push1, // t3 encoding of push.
kThumb2Pop1, // t3 encoding of pop.
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 00939ec98b..2a9b5a50e2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -995,11 +995,6 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
kFmtUnused, -1, -1,
IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
"ldmia", "!0C!!, <!1R>", 4, kFixupNone),
- ENCODING_MAP(kThumb2SubsRRI12, 0xf1b00000,
- kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
- kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
- "subs", "!0C,!1C,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2OrrRRRs, 0xea500000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b36dde98b2..f426055068 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -79,13 +79,14 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
LIR* target = NewLIR0(kPseudoTargetLabel);
// Load next key/disp
NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
- OpRegReg(kOpCmp, r_key, rl_src.low_reg);
+ OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg());
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
OpIT(kCondEq, "");
LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
tab_rec->anchor = switch_branch;
// Needs to use setflags encoding here
- NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1);
+ OpRegRegImm(kOpSub, r_idx, r_idx, 1); // For value == 1, this should set flags.
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpCondBranch(kCondNe, target);
}
@@ -115,10 +116,10 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.low_reg;
+ keyReg = rl_src.reg.GetReg();
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -293,7 +294,7 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
+ LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg());
LoadConstant(reset_reg, 0);
StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 65dee807a1..2c0cead6ca 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -49,7 +49,7 @@ class ArmMir2Lir : public Mir2Lir {
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 46542e118c..dd0a429a85 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -63,7 +63,7 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -143,16 +143,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode,
break;
case Instruction::LONG_TO_DOUBLE: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
rl_result = EvalLoc(rl_dest, kFPReg, true);
// TODO: clean up AllocTempDouble so that its result has the double bits set.
int tmp1 = AllocTempDouble();
int tmp2 = AllocTempDouble();
NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
- NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.low_reg, rl_result.high_reg), (src_reg & ~ARM_FP_DOUBLE));
+ NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE));
LoadConstantWide(tmp2, tmp2 + 1, 0x41f0000000000000LL);
- NewLIR3(kThumb2VmlaF64, S2d(rl_result.low_reg, rl_result.high_reg), tmp1 | ARM_FP_DOUBLE,
+ NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE,
tmp2 | ARM_FP_DOUBLE);
FreeTemp(tmp1);
FreeTemp(tmp2);
@@ -173,18 +173,18 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -199,14 +199,14 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
NewLIR0(kThumb2Fmstat);
ConditionCode ccode = mir->meta.ccode;
@@ -273,28 +273,28 @@ void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
// In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.low_reg, default_result);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ LoadConstant(rl_result.reg.GetReg(), default_result);
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
// In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.low_reg, default_result);
- NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), default_result);
+ NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- DCHECK(!ARM_FPREG(rl_result.low_reg));
+ DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
NewLIR0(kThumb2Fmstat);
OpIT((default_result == -1) ? kCondGt : kCondMi, "");
- NewLIR2(kThumb2MovI8M, rl_result.low_reg,
+ NewLIR2(kThumb2MovI8M, rl_result.reg.GetReg(),
ModifiedImmediate(-default_result)); // Must not alter ccodes
GenBarrier();
OpIT(kCondEq, "");
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
GenBarrier();
StoreValue(rl_dest, rl_result);
@@ -304,7 +304,7 @@ void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+ NewLIR2(kThumb2Vnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -312,8 +312,8 @@ void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -324,18 +324,18 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
- NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_result.low_reg, rl_result.high_reg));
+ NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()));
NewLIR0(kThumb2Fmstat);
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
NewLIR1(kThumbBlxR, r_tgt);
- NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+ NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1);
branch->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 43928fc5e2..94c8844dc0 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -90,10 +90,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
int t_reg = AllocTemp();
LoadConstant(t_reg, -1);
- OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
LIR* branch1 = OpCondBranch(kCondLt, NULL);
LIR* branch2 = OpCondBranch(kCondGt, NULL);
- OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
LIR* branch3 = OpCondBranch(kCondEq, NULL);
OpIT(kCondHi, "E");
@@ -107,7 +107,7 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
target1 = NewLIR0(kPseudoTargetLabel);
RegLocation rl_temp = LocCReturn(); // Just using as template, will change
- rl_temp.low_reg = t_reg;
+ rl_temp.reg.SetReg(t_reg);
StoreValue(rl_dest, rl_temp);
FreeTemp(t_reg);
@@ -125,8 +125,8 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.low_reg;
- int32_t high_reg = rl_src1.high_reg;
+ int32_t low_reg = rl_src1.reg.GetReg();
+ int32_t high_reg = rl_src1.reg.GetHighReg();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -178,15 +178,15 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
int false_val = mir->dalvikInsn.vC;
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if ((true_val == 1) && (false_val == 0)) {
- OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
OpIT(kCondEq, "E");
- LoadConstant(rl_result.low_reg, true_val);
- LoadConstant(rl_result.low_reg, false_val);
+ LoadConstant(rl_result.reg.GetReg(), true_val);
+ LoadConstant(rl_result.reg.GetReg(), false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else {
// Unlikely case - could be tuned.
@@ -194,10 +194,10 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
int t_reg2 = AllocTemp();
LoadConstant(t_reg1, true_val);
LoadConstant(t_reg2, false_val);
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
OpIT(kCondEq, "E");
- OpRegCopy(rl_result.low_reg, t_reg1);
- OpRegCopy(rl_result.low_reg, t_reg2);
+ OpRegCopy(rl_result.reg.GetReg(), t_reg1);
+ OpRegCopy(rl_result.reg.GetReg(), t_reg2);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
} else {
@@ -207,17 +207,17 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
rl_true = LoadValue(rl_true, kCoreReg);
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
- if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
OpIT(kCondNe, "");
- OpRegCopy(rl_result.low_reg, rl_false.low_reg);
- } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
+ OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
OpIT(kCondEq, "");
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
} else { // Normal - select between the two.
OpIT(kCondEq, "E");
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
}
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
@@ -247,7 +247,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
@@ -278,7 +278,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
OpCondBranch(ccode, taken);
}
@@ -415,21 +415,21 @@ bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int r_hi = AllocTemp();
int r_lo = AllocTemp();
- NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
+ NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
- rl_src.low_reg, EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi,
+ rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
- OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
- OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
+ OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
@@ -476,7 +476,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
// Simple case, use sdiv instruction.
- OpRegRegReg(kOpDiv, rl_result.low_reg, reg1, reg2);
+ OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2);
} else {
// Remainder case, use the following code:
// temp = reg1 / reg2 - integer division
@@ -486,7 +486,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
int temp = AllocTemp();
OpRegRegReg(kOpDiv, temp, reg1, reg2);
OpRegReg(kOpMul, temp, reg2);
- OpRegRegReg(kOpSub, rl_result.low_reg, reg1, temp);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp);
FreeTemp(temp);
}
@@ -501,10 +501,10 @@ bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
rl_src2 = LoadValue(rl_src2, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
OpIT((is_min) ? kCondGt : kCondLt, "E");
- OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
GenBarrier();
StoreValue(rl_dest, rl_result);
return true;
@@ -518,18 +518,18 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
- if (rl_address.low_reg != rl_result.low_reg) {
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
+ if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) {
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
} else {
- LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
}
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -543,13 +543,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == kLong) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, kWord);
- StoreBaseDisp(rl_address.low_reg, 4, rl_value.high_reg, kWord);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord);
+ StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
}
return true;
}
@@ -589,24 +589,24 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
bool load_early = true;
if (is_long) {
bool expected_is_core_reg =
- rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.low_reg);
+ rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg());
bool new_value_is_core_reg =
- rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.low_reg);
- bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.low_reg);
- bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.low_reg);
+ rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg());
+ bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg());
+ bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg());
if (!expected_is_good_reg && !new_value_is_good_reg) {
// None of expected/new_value is non-temp reg, need to load both late
load_early = false;
// Make sure they are not in the temp regs and the load will not be skipped.
if (expected_is_core_reg) {
- FlushRegWide(rl_src_expected.low_reg, rl_src_expected.high_reg);
+ FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg());
ClobberSReg(rl_src_expected.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
rl_src_expected.location = kLocDalvikFrame;
}
if (new_value_is_core_reg) {
- FlushRegWide(rl_src_new_value.low_reg, rl_src_new_value.high_reg);
+ FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg());
ClobberSReg(rl_src_new_value.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
rl_src_new_value.location = kLocDalvikFrame;
@@ -627,19 +627,19 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
- MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
int r_ptr = rARM_LR;
- OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
+ OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
// Free now unneeded rl_object and rl_offset to give more temps.
ClobberSReg(rl_object.s_reg_low);
- FreeTemp(rl_object.low_reg);
+ FreeTemp(rl_object.reg.GetReg());
ClobberSReg(rl_offset.s_reg_low);
- FreeTemp(rl_offset.low_reg);
+ FreeTemp(rl_offset.reg.GetReg());
RegLocation rl_expected;
if (!is_long) {
@@ -647,8 +647,11 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
- rl_new_value.low_reg = rl_expected.low_reg = AllocTemp();
- rl_new_value.high_reg = rl_expected.high_reg = AllocTemp();
+ // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
+ int low_reg = AllocTemp();
+ int high_reg = AllocTemp();
+ rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ rl_expected = rl_new_value;
}
// do {
@@ -662,13 +665,13 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_long) {
int r_tmp_high = AllocTemp();
if (!load_early) {
- LoadValueDirectWide(rl_src_expected, rl_expected.low_reg, rl_expected.high_reg);
+ LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg());
}
NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr);
- OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
- OpRegReg(kOpSub, r_tmp_high, rl_expected.high_reg);
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
+ OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg());
if (!load_early) {
- LoadValueDirectWide(rl_src_new_value, rl_new_value.low_reg, rl_new_value.high_reg);
+ LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg());
}
// Make sure we use ORR that sets the ccode
if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) {
@@ -680,14 +683,14 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.low_reg, rl_new_value.high_reg, r_ptr);
+ NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr);
} else {
NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0);
- OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
+ OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.low_reg, r_ptr, 0);
+ NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0);
}
// Still one conditional left from OpIT(kCondEq, "T") from either branch
@@ -695,16 +698,16 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
OpCondBranch(kCondEq, target);
if (!load_early) {
- FreeTemp(rl_expected.low_reg); // Now unneeded.
- FreeTemp(rl_expected.high_reg); // Now unneeded.
+ FreeTemp(rl_expected.reg.GetReg()); // Now unneeded.
+ FreeTemp(rl_expected.reg.GetHighReg()); // Now unneeded.
}
// result := (tmp1 != 0) ? 0 : 1;
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpRsub, rl_result.low_reg, r_tmp, 1);
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.low_reg, 0); /* cc */
+ LoadConstant(rl_result.reg.GetReg(), 0); /* cc */
FreeTemp(r_tmp); // Now unneeded.
StoreValue(rl_dest, rl_result);
@@ -730,10 +733,10 @@ LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+ OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -753,7 +756,8 @@ LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
// Decrement register and branch on condition
LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
// Combine sub & test using sub setflags encoding here
- NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
+ OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
+ DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
return OpCondBranch(c_code, target);
}
@@ -782,14 +786,14 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
int z_reg = AllocTemp();
LoadConstantNoClobber(z_reg, 0);
// Check for destructive overlap
- if (rl_result.low_reg == rl_src.high_reg) {
+ if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
int t_reg = AllocTemp();
- OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
- OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
- OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
+ OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg());
}
FreeTemp(z_reg);
StoreValueWide(rl_dest, rl_result);
@@ -827,41 +831,41 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
bool special_case = true;
// If operands are the same, or any pair has been promoted we're not the special case.
if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
- (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
- (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
+ (!IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg())) ||
+ (!IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg()))) {
special_case = false;
}
// Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
int res_lo = AllocTemp();
int res_hi;
- if (rl_src1.low_reg == rl_src2.low_reg) {
+ if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
res_hi = AllocTemp();
- NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
// In the special case, all temps are now allocated
- NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
if (special_case) {
- DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
- DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
- FreeTemp(rl_src1.high_reg);
+ DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ FreeTemp(rl_src1.reg.GetHighReg());
}
res_hi = AllocTemp();
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
- NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
if (special_case) {
- FreeTemp(rl_src1.low_reg);
- Clobber(rl_src1.low_reg);
- Clobber(rl_src1.high_reg);
+ FreeTemp(rl_src1.reg.GetReg());
+ Clobber(rl_src1.reg.GetReg());
+ Clobber(rl_src1.reg.GetHighReg());
}
}
FreeTemp(tmp1);
rl_result = GetReturnWide(false); // Just using as a template.
- rl_result.low_reg = res_lo;
- rl_result.high_reg = res_hi;
+ rl_result.reg.SetReg(res_lo);
+ rl_result.reg.SetHighReg(res_hi);
StoreValueWide(rl_dest, rl_result);
// Now, restore lr to its non-temp status.
Clobber(rARM_LR);
@@ -920,25 +924,25 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
int reg_ptr;
if (constant_index) {
- reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
+ reg_ptr = rl_array.reg.GetReg(); // NOTE: must not alter reg_ptr in constant case.
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
EncodeShift(kArmLsl, scale));
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -946,18 +950,18 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -966,15 +970,15 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
} else {
// Offset base, then use indexed load
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
- FreeTemp(rl_array.low_reg);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ FreeTemp(rl_array.reg.GetReg());
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1010,17 +1014,17 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
int reg_ptr;
bool allocated_reg_ptr_temp = false;
if (constant_index) {
- reg_ptr = rl_array.low_reg;
- } else if (IsTemp(rl_array.low_reg) && !card_mark) {
- Clobber(rl_array.low_reg);
- reg_ptr = rl_array.low_reg;
+ reg_ptr = rl_array.reg.GetReg();
+ } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
+ Clobber(rl_array.reg.GetReg());
+ reg_ptr = rl_array.reg.GetReg();
} else {
allocated_reg_ptr_temp = true;
reg_ptr = AllocTemp();
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -1028,7 +1032,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1038,39 +1042,39 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
EncodeShift(kArmLsl, scale));
}
if (needs_range_check) {
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
}
} else {
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
@@ -1093,53 +1097,53 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
- OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg());
} else if (shift_amount == 32) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else {
- OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
+ OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(),
EncodeShift(kArmLsr, 32 - shift_amount));
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
}
break;
default:
@@ -1194,36 +1198,36 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- NewLIR3(kThumb2AddRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
- NewLIR3(kThumb2AdcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
- OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
+ OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
}
- if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
- OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
+ OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
}
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
- OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
- if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
- OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
+ OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
}
- if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
- OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
}
break;
case Instruction::SUB_LONG_2ADDR:
case Instruction::SUB_LONG:
- NewLIR3(kThumb2SubRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
- NewLIR3(kThumb2SbcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
+ NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 83431ad235..ab1a053489 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -37,23 +37,19 @@ static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
RegLocation ArmMir2Lir::LocCReturn() {
- RegLocation res = ARM_LOC_C_RETURN;
- return res;
+ return arm_loc_c_return;
}
RegLocation ArmMir2Lir::LocCReturnWide() {
- RegLocation res = ARM_LOC_C_RETURN_WIDE;
- return res;
+ return arm_loc_c_return_wide;
}
RegLocation ArmMir2Lir::LocCReturnFloat() {
- RegLocation res = ARM_LOC_C_RETURN_FLOAT;
- return res;
+ return arm_loc_c_return_float;
}
RegLocation ArmMir2Lir::LocCReturnDouble() {
- RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
- return res;
+ return arm_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -530,14 +526,10 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
return new ArmMir2Lir(cu, mir_graph, arena);
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) {
+// Alloc a pair of core registers, or a double.
+RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
@@ -546,8 +538,7 @@ int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) {
low_reg = AllocTemp();
high_reg = AllocTemp();
}
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -594,11 +585,11 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() {
void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
/*
@@ -697,19 +688,19 @@ void ArmMir2Lir::ClobberCallerSave() {
RegLocation ArmMir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- res.low_reg = r2;
- res.high_reg = r3;
+ res.reg.SetReg(r2);
+ res.reg.SetHighReg(r3);
Clobber(r2);
Clobber(r3);
MarkInUse(r2);
MarkInUse(r3);
- MarkPair(res.low_reg, res.high_reg);
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
return res;
}
RegLocation ArmMir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.low_reg = r1;
+ res.reg.SetReg(r1);
Clobber(r1);
MarkInUse(r1);
return res;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 9d3968bff2..c2cfb4dbc1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -499,12 +499,6 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
else
opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
return NewLIR3(opcode, r_dest, r_src1, abs_value);
- } else if ((abs_value & 0x3ff) == abs_value) {
- if (op == kOpAdd)
- opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
- else
- opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return NewLIR3(opcode, r_dest, r_src1, abs_value);
}
if (mod_imm < 0) {
mod_imm = ModifiedImmediate(-value);
@@ -512,6 +506,15 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
op = (op == kOpAdd) ? kOpSub : kOpAdd;
}
}
+ if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
+ // This is deliberately used only if modified immediate encoding is inadequate since
+ // we sometimes actually use the flags for small values but not necessarily low regs.
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
+ else
+ opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
+ return NewLIR3(opcode, r_dest, r_src1, abs_value);
+ }
if (op == kOpSub) {
opcode = kThumb2SubRRI8M;
alt_opcode = kThumb2SubRRR;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 31854496ab..eb6f9d1f7e 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -78,11 +78,6 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
}
-bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
- return cu_->compiler_driver->ComputeInstanceFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
-}
-
/* Remove a LIR from the list. */
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
@@ -379,21 +374,21 @@ LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
return AddWordData(constant_list_p, val_lo);
}
-static void PushWord(std::vector<uint8_t>&buf, int data) {
+static void Push32(std::vector<uint8_t>&buf, int data) {
buf.push_back(data & 0xff);
buf.push_back((data >> 8) & 0xff);
buf.push_back((data >> 16) & 0xff);
buf.push_back((data >> 24) & 0xff);
}
-// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
-static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
- uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
- if (sizeof(void*) == sizeof(uint64_t)) {
- PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
- PushWord(buf, data & 0xFFFFFFFF);
+// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems.
+static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) {
+ uint64_t data = reinterpret_cast<uintptr_t>(pointer);
+ if (target64) {
+ Push32(buf, data & 0xFFFFFFFF);
+ Push32(buf, (data >> 32) & 0xFFFFFFFF);
} else {
- PushWord(buf, data);
+ Push32(buf, static_cast<uint32_t>(data));
}
}
@@ -408,7 +403,7 @@ void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
while (data_lir != NULL) {
- PushWord(code_buffer_, data_lir->operands[0]);
+ Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// Push code and method literals, record offsets for the compiler to patch.
@@ -424,7 +419,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
@@ -439,7 +434,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
// Push class literals.
@@ -453,7 +448,7 @@ void Mir2Lir::InstallLiteralPools() {
code_buffer_.size());
const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id);
+ PushPointer(code_buffer_, &id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -497,8 +492,8 @@ void Mir2Lir::InstallSwitchTables() {
<< std::hex << keys[elems] << ", disp: 0x"
<< std::hex << disp;
}
- PushWord(code_buffer_, keys[elems]);
- PushWord(code_buffer_,
+ Push32(code_buffer_, keys[elems]);
+ Push32(code_buffer_,
tab_rec->targets[elems]->offset - bx_offset);
}
} else {
@@ -510,7 +505,7 @@ void Mir2Lir::InstallSwitchTables() {
LOG(INFO) << " Case[" << elems << "] disp: 0x"
<< std::hex << disp;
}
- PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
+ Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
}
}
}
@@ -1070,10 +1065,12 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
DCHECK_EQ(fp_vmap_table_.size(), 0u);
vmap_encoder.PushBackUnsigned(0u); // Size is 0.
}
+
+ UniquePtr<std::vector<uint8_t> > cfi_info(ReturnCallFrameInformation());
CompiledMethod* result =
new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
- vmap_encoder.GetData(), native_gc_map_);
+ vmap_encoder.GetData(), native_gc_map_, cfi_info.get());
return result;
}
@@ -1216,4 +1213,9 @@ void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_re
AppendLIR(load_pc_rel);
}
+std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
+ // Default case is to do nothing.
+ return nullptr;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 3dcb964fab..b4d8dd6009 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,7 +31,7 @@ namespace verifier {
class MethodVerifier;
} // namespace verifier
-class CallInfo;
+struct CallInfo;
class Mir2Lir;
enum InlineMethodOpcode : uint16_t {
@@ -61,6 +61,7 @@ enum InlineMethodOpcode : uint16_t {
kInlineOpIGet,
kInlineOpIPut,
};
+std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
enum InlineMethodFlags : uint16_t {
kNoInlineMethodFlags = 0x0000,
@@ -78,13 +79,13 @@ enum IntrinsicFlags {
// kIntrinsicIsEmptyOrLength
kIntrinsicFlagLength = kIntrinsicFlagNone,
- kIntrinsicFlagIsEmpty = 1,
+ kIntrinsicFlagIsEmpty = kIntrinsicFlagMin,
// kIntrinsicIndexOf
- kIntrinsicFlagBase0 = 1,
+ kIntrinsicFlagBase0 = kIntrinsicFlagMin,
// kIntrinsicUnsafeGet, kIntrinsicUnsafePut, kIntrinsicUnsafeCas
- kIntrinsicFlagIsLong = 1,
+ kIntrinsicFlagIsLong = kIntrinsicFlagMin,
// kIntrinsicUnsafeGet, kIntrinsicUnsafePut
kIntrinsicFlagIsVolatile = 2,
// kIntrinsicUnsafePut, kIntrinsicUnsafeCas
@@ -187,7 +188,6 @@ class DexFileMethodInliner {
*/
bool GenSpecial(Mir2Lir* backend, uint32_t method_idx);
- private:
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
@@ -311,6 +311,7 @@ class DexFileMethodInliner {
kProtoCacheLast
};
+ private:
/**
* The maximum number of method parameters we support in the ProtoDef.
*/
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0533fbfcd7..00c51d40d3 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -128,12 +128,12 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
if ((rl_temp.location == kLocDalvikFrame) &&
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
- OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
+ OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
- OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+ OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -163,17 +163,17 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
+ OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- LoadValueDirect(rl_src, rl_result.low_reg);
+ LoadValueDirect(rl_src, rl_result.reg.GetReg());
}
- OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+ OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31);
StoreValueWide(rl_dest, rl_result);
}
@@ -195,7 +195,7 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -290,7 +290,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, kWord);
+ loc.reg.GetReg(), kWord);
}
}
/*
@@ -341,10 +341,10 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
StoreBaseDisp(TargetReg(kRet0),
mirror::Array::DataOffset(component_size).Int32Value() +
- i * 4, rl_arg.low_reg, kWord);
+ i * 4, rl_arg.reg.GetReg(), kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(rl_arg.low_reg)) {
- FreeTemp(rl_arg.low_reg);
+ if (IsTemp(rl_arg.reg.GetReg())) {
+ FreeTemp(rl_arg.reg.GetReg());
}
}
}
@@ -381,33 +381,27 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
const int r_base_;
};
-void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
- if (IsTemp(rl_method.low_reg)) {
- FreeTemp(rl_method.low_reg);
+ if (IsTemp(rl_method.reg.GetReg())) {
+ FreeTemp(rl_method.reg.GetReg());
}
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized.
// TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -420,9 +414,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -437,7 +431,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -449,20 +443,20 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
} else {
rl_src = LoadValue(rl_src, kAnyReg);
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
- rl_src.high_reg);
+ StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg(),
+ rl_src.reg.GetHighReg());
} else {
- StoreWordDisp(r_base, field_offset, rl_src.low_reg);
+ StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg.GetReg());
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.low_reg, r_base);
+ MarkGCCard(rl_src.reg.GetReg(), r_base);
}
FreeTemp(r_base);
} else {
@@ -471,33 +465,27 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pSet32Static));
- CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+ CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
}
}
-void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -510,9 +498,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -527,7 +515,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -535,14 +523,14 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
}
// r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
- rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_offset, rl_result.low_reg);
+ LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg.GetReg());
}
FreeTemp(r_base);
if (is_long_or_double) {
@@ -556,7 +544,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
:(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pGet32Static));
- CallRuntimeHelperImm(getterOffset, field_idx, true);
+ CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -698,36 +686,34 @@ void Mir2Lir::HandleThrowLaunchPads() {
}
}
-void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
-
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
RegLocation rl_result;
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
- rl_result.high_reg, rl_obj.s_reg_low);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
} else {
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
- if (is_volatile) {
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
+ INVALID_SREG);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
@@ -735,10 +721,10 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
- kWord, rl_obj.s_reg_low);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
StoreValue(rl_dest, rl_result);
@@ -748,7 +734,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
- CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+ CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -759,43 +745,42 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
}
}
-void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
- if (is_volatile) {
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
- if (is_volatile) {
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- if (is_volatile) {
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
- if (is_volatile) {
+ StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
+ rl_src.reg.GetReg(), kWord);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg());
}
}
} else {
@@ -803,7 +788,8 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
- CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+ CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
+ rl_obj, rl_src, true);
}
}
@@ -829,23 +815,23 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.low_reg, true);
+ type_idx, rl_method.reg.GetReg(), true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
+ LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
+ LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg());
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- LIR* branch = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
// Object to generate the slow path for class resolution.
@@ -861,8 +847,8 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
GenerateTargetLabel();
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
- rl_method_.low_reg, true);
- m2l_->OpRegCopy(rl_result_.low_reg, m2l_->TargetReg(kRet0));
+ rl_method_.reg.GetReg(), true);
+ m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -900,8 +886,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
int r_method;
if (rl_method.location == kLocPhysReg) {
// A temp would conflict with register use below.
- DCHECK(!IsTemp(rl_method.low_reg));
- r_method = rl_method.low_reg;
+ DCHECK(!IsTemp(rl_method.reg.GetReg()));
+ r_method = rl_method.reg.GetReg();
} else {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
@@ -960,9 +946,9 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
int res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
+ LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
}
@@ -1035,12 +1021,12 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.low_reg;
- if (result_reg == object.low_reg) {
+ int result_reg = rl_result.reg.GetReg();
+ if (result_reg == object.reg.GetReg()) {
result_reg = AllocTypedTemp(false, kCoreReg);
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
int object_class = AllocTypedTemp(false, kCoreReg);
@@ -1049,11 +1035,11 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
if (use_declaring_class) {
LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
- LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1077,7 +1063,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.low_reg, result_reg);
+ OpRegCopy(rl_result.reg.GetReg(), result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1133,7 +1119,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
// On MIPS rArg0 != rl_result, place false in result if branch is taken.
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1147,12 +1133,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.low_reg, 1); // .eq case - load true
- LoadConstant(rl_result.low_reg, 0); // .ne case - load false
+ LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true
+ LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false
} else {
- LoadConstant(rl_result.low_reg, 0); // ne case - load false
+ LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false
branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
- LoadConstant(rl_result.low_reg, 1); // eq case - load true
+ LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true
}
} else {
if (cu_->instruction_set == kThumb2) {
@@ -1169,7 +1155,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else {
if (!type_known_abstract) {
/* Uses branchovers */
- LoadConstant(rl_result.low_reg, 1); // assume true
+ LoadConstant(rl_result.reg.GetReg(), 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
@@ -1355,16 +1341,16 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
+ if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) {
int t_reg = AllocTemp();
- OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
- OpRegCopy(rl_result.low_reg, t_reg);
+ OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
- rl_src2.high_reg);
+ OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(),
+ rl_src2.reg.GetHighReg());
}
/*
* NOTE: If rl_dest refers to a frame variable in a large frame, the
@@ -1487,22 +1473,22 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
if (unary) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
} else {
if (shift_op) {
int t_reg = INVALID_REG;
rl_src2 = LoadValue(rl_src2, kCoreReg);
t_reg = AllocTemp();
- OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
+ OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31);
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
}
StoreValue(rl_dest, rl_result);
@@ -1512,9 +1498,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
@@ -1523,9 +1509,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
done = true;
}
}
@@ -1585,29 +1571,29 @@ bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
int t_reg = AllocTemp();
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
- OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+ OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
} else {
- OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
+ OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31);
OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
- OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
+ OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
}
} else {
int t_reg1 = AllocTemp();
int t_reg2 = AllocTemp();
if (lit == 2) {
- OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
} else {
- OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
+ OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31);
OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
}
}
StoreValue(rl_dest, rl_result);
@@ -1637,7 +1623,7 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (power_of_two) {
// Shift.
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit));
} else if (pop_count_le2) {
// Shift and add and shift.
int first_bit = LowestSetBit(lit);
@@ -1648,8 +1634,8 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li
DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
- OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1));
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg());
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1668,10 +1654,10 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set == kThumb2) {
- OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
+ OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
} else {
- OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
- OpRegImm(kOpAdd, rl_result.low_reg, lit);
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit);
}
StoreValue(rl_dest, rl_result);
return;
@@ -1764,7 +1750,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
bool done = false;
if (cu_->instruction_set == kMips) {
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
done = true;
} else if (cu_->instruction_set == kX86) {
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
@@ -1774,7 +1760,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
done = true;
}
}
@@ -1800,9 +1786,9 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy.
if (shift_op && (lit == 0)) {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
- OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
+ OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
}
StoreValue(rl_dest, rl_result);
}
@@ -1822,15 +1808,15 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rl_result.low_reg == rl_src2.high_reg) {
+ if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) {
int t_reg = AllocTemp();
- OpRegCopy(t_reg, rl_src2.high_reg);
- OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
+ OpRegCopy(t_reg, rl_src2.reg.GetHighReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
} else {
- OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
- OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
+ OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg());
}
StoreValueWide(rl_dest, rl_result);
return;
@@ -2003,7 +1989,7 @@ void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
/* Generic code for generating a wide constant into a VR. */
void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.low_reg, rl_result.high_reg, value);
+ LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 35d193c65c..dd3d466d94 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -284,9 +284,9 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.low_reg = TargetReg(kArg0);
+ rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
rl_src.home = false;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
@@ -680,7 +680,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
// Wide spans, we need the 2nd half of uses[2].
rl_arg = UpdateLocWide(rl_use2);
if (rl_arg.location == kLocPhysReg) {
- reg = rl_arg.high_reg;
+ reg = rl_arg.reg.GetHighReg();
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
@@ -701,8 +701,10 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
rl_arg = info->args[next_use];
rl_arg = UpdateRawLoc(rl_arg);
if (rl_arg.location == kLocPhysReg) {
- low_reg = rl_arg.low_reg;
- high_reg = rl_arg.high_reg;
+ low_reg = rl_arg.reg.GetReg();
+ if (rl_arg.wide) {
+ high_reg = rl_arg.reg.GetHighReg();
+ }
} else {
low_reg = TargetReg(kArg2);
if (rl_arg.wide) {
@@ -775,14 +777,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, loc.high_reg);
+ loc.reg.GetReg(), loc.reg.GetHighReg());
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.low_reg, kWord);
+ loc.reg.GetReg(), kWord);
}
next_arg++;
}
@@ -983,7 +985,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
rl_idx = LoadValue(rl_idx, kCoreReg);
}
int reg_max;
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* launch_pad = NULL;
int reg_off = INVALID_REG;
@@ -993,15 +995,15 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
reg_ptr = AllocTemp();
if (range_check) {
reg_max = AllocTemp();
- LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
+ LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
}
- LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
- LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
- OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+ OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
FreeTemp(reg_max);
OpCondBranch(kCondUge, launch_pad);
}
@@ -1013,33 +1015,33 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
if (rl_idx.is_const) {
- OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.low_reg, count_offset,
+ OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
} else {
- OpRegMem(kOpCmp, rl_idx.low_reg, rl_obj.low_reg, count_offset);
+ OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
OpCondBranch(kCondUge, launch_pad);
}
}
reg_off = AllocTemp();
reg_ptr = AllocTemp();
- LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
- LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
} else {
- OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
+ OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
}
- FreeTemp(rl_obj.low_reg);
- if (rl_idx.low_reg != INVALID_REG) {
- FreeTemp(rl_idx.low_reg);
+ FreeTemp(rl_obj.reg.GetReg());
+ if (rl_idx.location == kLocPhysReg) {
+ FreeTemp(rl_idx.reg.GetReg());
}
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set != kX86) {
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+ LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
} else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.low_reg,
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
INVALID_REG, kUnsignedHalf, INVALID_SREG);
}
FreeTemp(reg_off);
@@ -1064,18 +1066,18 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
- LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
+ LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
int t_reg = AllocTemp();
- OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
- OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
+ OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
+ OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- OpRegImm(kOpSub, rl_result.low_reg, 1);
- OpRegImm(kOpLsr, rl_result.low_reg, 31);
+ OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
+ OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
}
}
StoreValue(rl_dest, rl_result);
@@ -1092,15 +1094,15 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
- int r_i_low = rl_i.low_reg;
- if (rl_i.low_reg == rl_result.low_reg) {
- // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV.
+ int r_i_low = rl_i.reg.GetReg();
+ if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
+ // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
r_i_low = AllocTemp();
- OpRegCopy(r_i_low, rl_i.low_reg);
+ OpRegCopy(r_i_low, rl_i.reg.GetReg());
}
- OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
- OpRegReg(kOpRev, rl_result.high_reg, r_i_low);
- if (rl_i.low_reg == rl_result.low_reg) {
+ OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
+ OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
+ if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
FreeTemp(r_i_low);
}
StoreValueWide(rl_dest, rl_result);
@@ -1108,7 +1110,7 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
DCHECK(size == kWord || size == kSignedHalf);
OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
- OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1125,9 +1127,9 @@ bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1144,11 +1146,11 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
- OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
+ OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
} else {
@@ -1158,16 +1160,16 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
- OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
- OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
- OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31);
+ OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1184,7 +1186,7 @@ bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegRegReg(kOpAnd, rl_result.low_reg, rl_src.low_reg, signMask);
+ OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask);
FreeTemp(signMask);
StoreValue(rl_dest, rl_result);
return true;
@@ -1199,12 +1201,12 @@ bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegReg(kOpAnd, rl_result.high_reg, signMask);
+ OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask);
FreeTemp(signMask);
StoreValueWide(rl_dest, rl_result);
return true;
@@ -1316,10 +1318,10 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
ThreadOffset offset = Thread::PeerOffset();
if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
- LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
+ LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
} else {
CHECK(cu_->instruction_set == kX86);
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
+ reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1343,11 +1345,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
- OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
- LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
+ LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1372,20 +1374,20 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
- StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+ OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
+ StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
- FreeTemp(rl_offset.low_reg);
+ FreeTemp(rl_offset.reg.GetReg());
if (is_volatile) {
GenMemBarrier(kStoreLoad);
}
if (is_object) {
- MarkGCCard(rl_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
}
return true;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index f7c2821afd..3b79df99ce 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -92,7 +92,7 @@ LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) {
void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) {
rl_src = UpdateLoc(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(r_dest, rl_src.low_reg);
+ OpRegCopy(r_dest, rl_src.reg.GetReg());
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
@@ -122,7 +122,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo,
int reg_hi) {
rl_src = UpdateLocWide(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(reg_lo, reg_hi, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
} else {
@@ -150,9 +150,9 @@ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo,
RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirect(rl_src, rl_src.low_reg);
+ LoadValueDirect(rl_src, rl_src.reg.GetReg());
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
}
return rl_src;
}
@@ -175,34 +175,34 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
rl_src = UpdateLoc(rl_src);
rl_dest = UpdateLoc(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.low_reg) ||
- IsPromoted(rl_src.low_reg) ||
+ if (IsLive(rl_src.reg.GetReg()) ||
+ IsPromoted(rl_src.reg.GetReg()) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live/promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.low_reg = rl_src.low_reg;
- Clobber(rl_src.low_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirect(rl_src, rl_dest.low_reg);
+ LoadValueDirect(rl_src, rl_dest.reg.GetReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.low_reg) &&
+ if (IsDirty(rl_dest.reg.GetReg()) &&
oat_live_out(rl_dest.s_reg_low)) {
def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, kWord);
+ rl_dest.reg.GetReg(), kWord);
MarkClean(rl_dest);
def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -216,10 +216,10 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) {
DCHECK(rl_src.wide);
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg);
+ LoadValueDirectWide(rl_src, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.low_reg, rl_src.s_reg_low);
- MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
+ MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.reg.GetHighReg(), GetSRegHi(rl_src.s_reg_low));
}
return rl_src;
}
@@ -237,57 +237,59 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) {
}
LIR* def_start;
LIR* def_end;
- DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+ DCHECK((rl_src.location != kLocPhysReg) ||
+ (IsFpReg(rl_src.reg.GetReg()) == IsFpReg(rl_src.reg.GetHighReg())));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
+ rl_src = UpdateLocWide(rl_src);
+ rl_dest = UpdateLocWide(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.low_reg) ||
- IsLive(rl_src.high_reg) ||
- IsPromoted(rl_src.low_reg) ||
- IsPromoted(rl_src.high_reg) ||
+ if (IsLive(rl_src.reg.GetReg()) ||
+ IsLive(rl_src.reg.GetHighReg()) ||
+ IsPromoted(rl_src.reg.GetReg()) ||
+ IsPromoted(rl_src.reg.GetHighReg()) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live or promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg,
- rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(),
+ rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.low_reg = rl_src.low_reg;
- rl_dest.high_reg = rl_src.high_reg;
- Clobber(rl_src.low_reg);
- Clobber(rl_src.high_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg.GetHighReg());
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg);
+ LoadValueDirectWide(rl_src, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.low_reg != rl_dest.high_reg) {
- MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.low_reg, rl_dest.high_reg);
+ MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.low_reg) ||
- IsDirty(rl_dest.high_reg)) &&
+ if ((IsDirty(rl_dest.reg.GetReg()) ||
+ IsDirty(rl_dest.reg.GetHighReg())) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, rl_dest.high_reg);
+ rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -298,25 +300,25 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
} else {
// Just re-assign the register. Dest gets Src's reg.
- rl_dest.low_reg = rl_src.low_reg;
rl_dest.location = kLocPhysReg;
- Clobber(rl_src.low_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.low_reg) &&
+ if (IsDirty(rl_dest.reg.GetReg()) &&
oat_live_out(rl_dest.s_reg_low)) {
LIR *def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, kWord);
+ rl_dest.reg.GetReg(), kWord);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -327,46 +329,45 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
}
void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
- DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+ DCHECK_EQ(IsFpReg(rl_src.reg.GetReg()), IsFpReg(rl_src.reg.GetHighReg()));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, rl_src.low_reg, rl_src.high_reg);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
// Just re-assign the registers. Dest gets Src's regs.
- rl_dest.low_reg = rl_src.low_reg;
- rl_dest.high_reg = rl_src.high_reg;
rl_dest.location = kLocPhysReg;
- Clobber(rl_src.low_reg);
- Clobber(rl_src.high_reg);
+ rl_dest.reg = rl_src.reg;
+ Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.reg.GetHighReg());
}
// Dest is now live and dirty (until/if we flush it to home location).
- MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.low_reg != rl_dest.high_reg) {
- MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
+ MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.low_reg, rl_dest.high_reg);
+ MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.low_reg) ||
- IsDirty(rl_dest.high_reg)) &&
+ if ((IsDirty(rl_dest.reg.GetReg()) ||
+ IsDirty(rl_dest.reg.GetHighReg())) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
LIR *def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.low_reg, rl_dest.high_reg);
+ rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -385,14 +386,13 @@ RegLocation Mir2Lir::LoadCurrMethod() {
RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
DCHECK(!loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.low_reg));
- DCHECK(!IsFpReg(loc.high_reg));
- if (IsTemp(loc.low_reg)) {
- Clobber(loc.low_reg);
+ DCHECK(!IsFpReg(loc.reg.GetReg()));
+ if (IsTemp(loc.reg.GetReg())) {
+ Clobber(loc.reg.GetReg());
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.low_reg);
- loc.low_reg = temp_low;
+ OpRegCopy(temp_low, loc.reg.GetReg());
+ loc.reg.SetReg(temp_low);
}
// Ensure that this doesn't represent the original SR any more.
@@ -403,21 +403,21 @@ RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
DCHECK(loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.low_reg));
- DCHECK(!IsFpReg(loc.high_reg));
- if (IsTemp(loc.low_reg)) {
- Clobber(loc.low_reg);
+ DCHECK(!IsFpReg(loc.reg.GetReg()));
+ DCHECK(!IsFpReg(loc.reg.GetHighReg()));
+ if (IsTemp(loc.reg.GetReg())) {
+ Clobber(loc.reg.GetReg());
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.low_reg);
- loc.low_reg = temp_low;
+ OpRegCopy(temp_low, loc.reg.GetReg());
+ loc.reg.SetReg(temp_low);
}
- if (IsTemp(loc.high_reg)) {
- Clobber(loc.high_reg);
+ if (IsTemp(loc.reg.GetHighReg())) {
+ Clobber(loc.reg.GetHighReg());
} else {
int temp_high = AllocTemp();
- OpRegCopy(temp_high, loc.high_reg);
- loc.high_reg = temp_high;
+ OpRegCopy(temp_high, loc.reg.GetHighReg());
+ loc.reg.SetHighReg(temp_high);
}
// Ensure that this doesn't represent the original SR any more.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index a663519b82..88f46fd59a 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -114,7 +114,7 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
LoadWordDisp(rBase, 0, r_key);
OpRegImm(kOpAdd, rBase, 8);
- OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
+ OpCmpBranch(kCondNe, rl_src.reg.GetReg(), r_key, loop_label);
int r_disp = AllocTemp();
LoadWordDisp(rBase, -4, r_disp);
OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
@@ -162,7 +162,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
bool large_bias = false;
int r_key;
if (low_key == 0) {
- r_key = rl_src.low_reg;
+ r_key = rl_src.reg.GetReg();
} else if ((low_key & 0xffff) != low_key) {
r_key = AllocTemp();
LoadConstant(r_key, low_key);
@@ -179,9 +179,9 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
NewLIR0(kMipsNop);
} else {
if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
+ OpRegRegReg(kOpSub, r_key, rl_src.reg.GetReg(), r_key);
} else {
- OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, r_key, rl_src.reg.GetReg(), low_key);
}
}
GenBarrier(); // Scheduling barrier
@@ -263,7 +263,7 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
+ LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.reg.GetReg());
LoadConstant(reset_reg, 0);
StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index dad8a3b492..61eb68dc21 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -49,7 +49,7 @@ class MipsMir2Lir : public Mir2Lir {
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 9e2fea94de..cf4f19f84c 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -64,7 +64,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
}
@@ -157,18 +157,18 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -221,7 +221,7 @@ void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -229,8 +229,8 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 013041a9a5..fec801bb4a 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -47,13 +47,13 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
int t0 = AllocTemp();
int t1 = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
- NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
- NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL);
- NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
- NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
- NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+ NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg.GetReg(), 0, NULL);
+ NewLIR3(kMipsSltu, t0, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMipsSltu, t1, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
FreeTemp(t0);
FreeTemp(t1);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -228,9 +228,9 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
}
return rl_result;
}
@@ -242,9 +242,9 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
+ NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
+ NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
}
FreeTemp(t_reg);
return rl_result;
@@ -290,7 +290,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -306,7 +306,7 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
return true;
}
@@ -329,11 +329,11 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -385,11 +385,11 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
* addu v1,v1,t1
*/
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
int t_reg = AllocTemp();
- OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
- NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+ OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpAdd, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -408,10 +408,10 @@ void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
*/
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ NewLIR3(kMipsSltu, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -427,11 +427,11 @@ void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
* subu v1,v1,t1
*/
- OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
- OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg);
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
- OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.reg.GetReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -471,7 +471,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
int reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -479,28 +479,28 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
- FreeTemp(rl_array.low_reg);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ FreeTemp(rl_array.reg.GetReg());
if ((size == kLong) || (size == kDouble)) {
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
}
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -508,10 +508,10 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -538,17 +538,17 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_index = LoadValue(rl_index, kCoreReg);
int reg_ptr = INVALID_REG;
bool allocated_reg_ptr_temp = false;
- if (IsTemp(rl_array.low_reg) && !card_mark) {
- Clobber(rl_array.low_reg);
- reg_ptr = rl_array.low_reg;
+ if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
+ Clobber(rl_array.reg.GetReg());
+ reg_ptr = rl_array.reg.GetReg();
} else {
reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.low_reg);
+ OpRegCopy(reg_ptr, rl_array.reg.GetReg());
allocated_reg_ptr_temp = true;
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -556,7 +556,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
+ LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegImm(kOpAdd, reg_ptr, data_offset);
@@ -565,34 +565,34 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
// TUNING: specific wide routine that can handle fp regs
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
}
rl_src = LoadValueWide(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 00eef96cb5..59f442c61a 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -141,16 +141,6 @@ namespace art {
#define rMIPS_LR INVALID_REG
#define rMIPS_PC INVALID_REG
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_V0, INVALID_REG, \
- INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
- INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_RESULT0, \
- r_RESULT1, INVALID_SREG, INVALID_SREG}
-#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
- r_FRESULT1, INVALID_SREG, INVALID_SREG}
-
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
kMipsRegSP = 29,
@@ -279,6 +269,20 @@ enum MipsNativeRegisterPool {
#define rMIPS_INVOKE_TGT r_T9
#define rMIPS_COUNT INVALID_REG
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+const RegLocation mips_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r_V0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r_V0, r_V1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, r_F0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r_F0, r_F1), INVALID_SREG, INVALID_SREG};
+
enum MipsShiftEncodings {
kMipsLsl = 0x0,
kMipsLsr = 0x1,
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 224e8f21f2..85c250da0f 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -40,23 +40,19 @@ static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
RegLocation MipsMir2Lir::LocCReturn() {
- RegLocation res = MIPS_LOC_C_RETURN;
- return res;
+ return mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnWide() {
- RegLocation res = MIPS_LOC_C_RETURN_WIDE;
- return res;
+ return mips_loc_c_return_wide;
}
RegLocation MipsMir2Lir::LocCReturnFloat() {
- RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
- return res;
+ return mips_loc_c_return_float;
}
RegLocation MipsMir2Lir::LocCReturnDouble() {
- RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
- return res;
+ return mips_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -441,27 +437,20 @@ void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#endif
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int MipsMir2Lir::AllocTypedTempPair(bool fp_hint,
- int reg_class) {
+// Alloc a pair of core registers, or a double.
+RegStorage MipsMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg + 1;
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
low_reg = AllocTemp();
high_reg = AllocTemp();
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -505,11 +494,11 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
}
void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
/*
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 28d53ad77b..d9b241e864 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -53,8 +53,9 @@ int Mir2Lir::LoadArg(int in_position, bool wide) {
if (wide && reg_arg_high == INVALID_REG) {
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (reg_arg_low == INVALID_REG) {
- int new_regs = AllocTypedTempPair(false, kAnyReg);
- DECODE_REG_PAIR(new_regs, reg_arg_low, reg_arg_high);
+ RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
+ reg_arg_low = new_regs.GetReg();
+ reg_arg_high = new_regs.GetHighReg();
LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG);
} else {
reg_arg_high = AllocTemp();
@@ -70,6 +71,7 @@ int Mir2Lir::LoadArg(int in_position, bool wide) {
}
if (wide) {
+ // TODO: replace w/ RegStorage.
return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high);
} else {
return reg_arg_low;
@@ -90,25 +92,25 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
if (!rl_dest.wide) {
int reg = GetArgMappingToPhysicalReg(in_position);
if (reg != INVALID_REG) {
- OpRegCopy(rl_dest.low_reg, reg);
+ OpRegCopy(rl_dest.reg.GetReg(), reg);
} else {
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
}
} else {
int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg_arg_low, reg_arg_high);
+ OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), reg_arg_low, reg_arg_high);
} else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) {
- OpRegCopy(rl_dest.low_reg, reg_arg_low);
+ OpRegCopy(rl_dest.reg.GetReg(), reg_arg_low);
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.high_reg);
+ LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHighReg());
} else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopy(rl_dest.high_reg, reg_arg_high);
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
+ OpRegCopy(rl_dest.reg.GetHighReg(), reg_arg_high);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
} else {
- LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
}
}
}
@@ -131,9 +133,9 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
int reg_obj = LoadArg(data.object_arg);
if (wide) {
- LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
+ LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg.GetReg(), kWord, INVALID_SREG);
}
if (data.is_volatile) {
GenMemBarrier(kLoadLoad);
@@ -210,7 +212,7 @@ bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& speci
successful = true;
RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F');
GenPrintLabel(mir);
- LoadConstant(rl_dest.low_reg, static_cast<int>(special.d.data));
+ LoadConstant(rl_dest.reg.GetReg(), static_cast<int>(special.d.data));
return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir);
break;
}
@@ -376,19 +378,19 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::CONST_4:
case Instruction::CONST_16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.low_reg, vB);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), vB);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.low_reg);
+ Workaround7250540(rl_dest, rl_result.reg.GetReg());
}
break;
case Instruction::CONST_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.low_reg, vB << 16);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), vB << 16);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.low_reg);
+ Workaround7250540(rl_dest, rl_result.reg.GetReg());
}
break;
@@ -403,7 +405,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::CONST_WIDE_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
+ LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
static_cast<int64_t>(vB) << 48);
StoreValueWide(rl_dest, rl_result);
break;
@@ -436,9 +438,9 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+ GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg);
+ LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
break;
@@ -601,72 +603,72 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IGET_OBJECT:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
- GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
- GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_SHORT:
- GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
- GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
break;
case Instruction::IPUT_WIDE:
- GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
- GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_CHAR:
- GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_SHORT:
- GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::SGET_OBJECT:
- GenSget(vB, rl_dest, false, true);
+ GenSget(mir, rl_dest, false, true);
break;
case Instruction::SGET:
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT:
- GenSget(vB, rl_dest, false, false);
+ GenSget(mir, rl_dest, false, false);
break;
case Instruction::SGET_WIDE:
- GenSget(vB, rl_dest, true, false);
+ GenSget(mir, rl_dest, true, false);
break;
case Instruction::SPUT_OBJECT:
- GenSput(vB, rl_src[0], false, true);
+ GenSput(mir, rl_src[0], false, true);
break;
case Instruction::SPUT:
@@ -674,11 +676,11 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT:
- GenSput(vB, rl_src[0], false, false);
+ GenSput(mir, rl_src[0], false, false);
break;
case Instruction::SPUT_WIDE:
- GenSput(vB, rl_src[0], true, false);
+ GenSput(mir, rl_src[0], true, false);
break;
case Instruction::INVOKE_STATIC_RANGE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b74052c117..19621b01ab 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -21,6 +21,7 @@
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
+#include "dex/reg_storage.h"
#include "dex/backend.h"
#include "driver/compiler_driver.h"
#include "leb128.h"
@@ -395,7 +396,6 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
void SetupResourceMasks(LIR* lir);
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -558,13 +558,13 @@ class Mir2Lir : public Backend {
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
void GenFilledNewArray(CallInfo* info);
- void GenSput(uint32_t field_idx, RegLocation rl_src,
+ void GenSput(MIR* mir, RegLocation rl_src,
bool is_long_or_double, bool is_object);
- void GenSget(uint32_t field_idx, RegLocation rl_dest,
+ void GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object);
- void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
- void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src);
@@ -813,7 +813,7 @@ class Mir2Lir : public Backend {
virtual bool IsFpReg(int reg) = 0;
virtual bool SameRegType(int reg1, int reg2) = 0;
virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
- virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
+ virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0;
virtual int S2d(int low_reg, int high_reg) = 0;
virtual int TargetReg(SpecialTargetRegister reg) = 0;
virtual int GetArgMappingToPhysicalReg(int arg_num) = 0;
@@ -1089,6 +1089,11 @@ class Mir2Lir : public Backend {
bool can_assume_type_is_in_dex_cache,
uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
+ /*
+ * @brief Generate the debug_frame FDE information if possible.
+ * @returns pointer to vector containg CFE information, or NULL.
+ */
+ virtual std::vector<uint8_t>* ReturnCallFrameInformation();
/**
* @brief Used to insert marker that can be used to associate MIR with LIR.
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 0a651713ab..3a8942e46e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -460,7 +460,7 @@ void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) {
DCHECK(!rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.low_reg);
+ RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
p->def_start = start->next;
p->def_end = finish;
}
@@ -474,8 +474,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
DCHECK(rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.low_reg);
- ResetDef(rl.high_reg); // Only track low of pair
+ RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg()); // Only track low of pair
p->def_start = start->next;
p->def_end = finish;
}
@@ -483,8 +483,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
DCHECK(rl.wide);
if (rl.location == kLocPhysReg) {
- RegisterInfo* info_lo = GetRegInfo(rl.low_reg);
- RegisterInfo* info_hi = GetRegInfo(rl.high_reg);
+ RegisterInfo* info_lo = GetRegInfo(rl.reg.GetReg());
+ RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg());
if (info_lo->is_temp) {
info_lo->pair = false;
info_lo->def_start = NULL;
@@ -502,18 +502,18 @@ RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
void Mir2Lir::ResetDefLoc(RegLocation rl) {
DCHECK(!rl.wide);
- RegisterInfo* p = IsTemp(rl.low_reg);
+ RegisterInfo* p = IsTemp(rl.reg.GetReg());
if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(!p->pair);
NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
}
- ResetDef(rl.low_reg);
+ ResetDef(rl.reg.GetReg());
}
void Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.low_reg);
- RegisterInfo* p_high = IsTemp(rl.high_reg);
+ RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
+ RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -521,8 +521,8 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) {
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.low_reg);
- ResetDef(rl.high_reg);
+ ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg());
}
void Mir2Lir::ResetDefTracking() {
@@ -621,10 +621,10 @@ void Mir2Lir::MarkPair(int low_reg, int high_reg) {
}
void Mir2Lir::MarkClean(RegLocation loc) {
- RegisterInfo* info = GetRegInfo(loc.low_reg);
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
info->dirty = false;
if (loc.wide) {
- info = GetRegInfo(loc.high_reg);
+ info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = false;
}
}
@@ -634,10 +634,10 @@ void Mir2Lir::MarkDirty(RegLocation loc) {
// If already home, can't be dirty
return;
}
- RegisterInfo* info = GetRegInfo(loc.low_reg);
+ RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
info->dirty = true;
if (loc.wide) {
- info = GetRegInfo(loc.high_reg);
+ info = GetRegInfo(loc.reg.GetHighReg());
info->dirty = true;
}
}
@@ -707,7 +707,7 @@ RegLocation Mir2Lir::UpdateLoc(RegLocation loc) {
Clobber(info_lo->partner);
FreeTemp(info_lo->reg);
} else {
- loc.low_reg = info_lo->reg;
+ loc.reg = RegStorage(RegStorage::k32BitSolo, info_lo->reg);
loc.location = kLocPhysReg;
}
}
@@ -744,11 +744,10 @@ RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) {
}
if (match) {
// Can reuse - update the register usage info
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_hi->reg;
loc.location = kLocPhysReg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -779,7 +778,6 @@ RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) {
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -787,22 +785,21 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
/* If already in registers, we can assume proper form. Right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetReg()), IsFpReg(loc.reg.GetHighReg()));
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Reallocate and copy */
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- low_reg = new_regs & 0xff;
- high_reg = (new_regs >> 8) & 0xff;
- OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- CopyRegInfo(high_reg, loc.high_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = high_reg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
+ low_reg = new_regs.GetReg();
+ high_reg = new_regs.GetHighReg();
+ OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ CopyRegInfo(high_reg, loc.reg.GetHighReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg = new_regs;
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
}
return loc;
}
@@ -810,20 +807,18 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- loc.low_reg = new_regs & 0xff;
- loc.high_reg = (new_regs >> 8) & 0xff;
+ loc.reg = AllocTypedTempWide(loc.fp, reg_class);
- MarkPair(loc.low_reg, loc.high_reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
// Does this wide value live in two registers or one vector register?
- if (loc.low_reg != loc.high_reg) {
- MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
+ if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
}
}
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
@@ -836,13 +831,13 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Realloc, copy and transfer ownership */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.low_reg);
- CopyRegInfo(new_reg, loc.low_reg);
- Clobber(loc.low_reg);
- loc.low_reg = new_reg;
+ OpRegCopy(new_reg, loc.reg.GetReg());
+ CopyRegInfo(new_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
}
return loc;
}
@@ -850,11 +845,11 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.low_reg = new_reg;
+ loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
}
return loc;
}
@@ -1006,32 +1001,29 @@ void Mir2Lir::DoPromotion() {
if (curr->fp) {
if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].FpReg;
+ curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].FpReg);
curr->home = true;
}
} else {
if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].core_reg;
+ curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].core_reg);
curr->home = true;
}
}
- curr->high_reg = INVALID_REG;
} else {
if (curr->high_word) {
continue;
}
if (curr->fp) {
if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) &&
- (promotion_map_[p_map_idx+1].fp_location ==
- kLocPhysReg)) {
+ (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg)) {
int low_reg = promotion_map_[p_map_idx].FpReg;
int high_reg = promotion_map_[p_map_idx+1].FpReg;
// Doubles require pair of singles starting at even reg
if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
curr->location = kLocPhysReg;
- curr->low_reg = low_reg;
- curr->high_reg = high_reg;
+ curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
curr->home = true;
}
}
@@ -1040,8 +1032,8 @@ void Mir2Lir::DoPromotion() {
&& (promotion_map_[p_map_idx+1].core_location ==
kLocPhysReg)) {
curr->location = kLocPhysReg;
- curr->low_reg = promotion_map_[p_map_idx].core_reg;
- curr->high_reg = promotion_map_[p_map_idx+1].core_reg;
+ curr->reg = RegStorage(RegStorage::k64BitPair, promotion_map_[p_map_idx].core_reg,
+ promotion_map_[p_map_idx+1].core_reg);
curr->home = true;
}
}
@@ -1068,13 +1060,13 @@ RegLocation Mir2Lir::GetReturnWide(bool is_double) {
RegLocation gpr_res = LocCReturnWide();
RegLocation fpr_res = LocCReturnDouble();
RegLocation res = is_double ? fpr_res : gpr_res;
- Clobber(res.low_reg);
- Clobber(res.high_reg);
- LockTemp(res.low_reg);
- LockTemp(res.high_reg);
+ Clobber(res.reg.GetReg());
+ Clobber(res.reg.GetHighReg());
+ LockTemp(res.reg.GetReg());
+ LockTemp(res.reg.GetHighReg());
// Does this wide value live in two registers or one vector register?
- if (res.low_reg != res.high_reg) {
- MarkPair(res.low_reg, res.high_reg);
+ if (res.reg.GetReg() != res.reg.GetHighReg()) {
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
}
return res;
}
@@ -1083,11 +1075,11 @@ RegLocation Mir2Lir::GetReturn(bool is_float) {
RegLocation gpr_res = LocCReturn();
RegLocation fpr_res = LocCReturnFloat();
RegLocation res = is_float ? fpr_res : gpr_res;
- Clobber(res.low_reg);
+ Clobber(res.reg.GetReg());
if (cu_->instruction_set == kMips) {
- MarkInUse(res.low_reg);
+ MarkInUse(res.reg.GetReg());
} else {
- LockTemp(res.low_reg);
+ LockTemp(res.reg.GetReg());
}
return res;
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 0613cdff7a..c92d2bb730 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -40,7 +40,7 @@ void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
int key = keys[i];
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
+ OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key,
&block_label_list_[case_block->id]);
}
}
@@ -87,7 +87,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
rl_method = LoadValue(rl_method, kCoreReg);
- start_of_method_reg = rl_method.low_reg;
+ start_of_method_reg = rl_method.reg.GetReg();
store_method_addr_used_ = true;
} else {
start_of_method_reg = AllocTemp();
@@ -97,10 +97,10 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.low_reg;
+ keyReg = rl_src.reg.GetReg();
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -164,7 +164,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
+ NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
NewLIR2(kX86Mov32TI, ex_offset, 0);
StoreValue(rl_dest, rl_result);
}
@@ -198,7 +198,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
LockTemp(rX86_ARG2);
/* Build frame, return address already on stack */
- OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
+ stack_decrement_ = OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
/*
* We can safely skip the stack overflow check if we're
@@ -246,7 +246,7 @@ void X86Mir2Lir::GenExitSequence() {
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
/* Remove frame except for return address */
- OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+ stack_increment_ = OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
NewLIR0(kX86Ret);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 421d51e4fd..7cc2c08b96 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -48,8 +48,9 @@ class X86Mir2Lir : public Mir2Lir {
// Required for target - register utilities.
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
+ // TODO: for consistency, make this return a RegStorage as well?
int AllocTypedTemp(bool fp_hint, int reg_class);
- int AllocTypedTempPair(bool fp_hint, int reg_class);
+ RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
@@ -302,6 +303,18 @@ class X86Mir2Lir : public Mir2Lir {
*/
void InstallLiteralPools();
+ /*
+ * @brief Generate the debug_frame CFI information.
+ * @returns pointer to vector containing CFE information
+ */
+ static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
+
+ /*
+ * @brief Generate the debug_frame FDE information.
+ * @returns pointer to vector containing CFE information
+ */
+ std::vector<uint8_t>* ReturnCallFrameInformation();
+
private:
void EmitPrefix(const X86EncodingMap* entry);
void EmitOpcode(const X86EncodingMap* entry);
@@ -549,6 +562,12 @@ class X86Mir2Lir : public Mir2Lir {
// Instructions needing patching with PC relative code addresses.
GrowableArray<LIR*> call_method_insns_;
+
+ // Prologue decrement of stack pointer.
+ LIR* stack_decrement_;
+
+ // Epilogue increment of stack pointer.
+ LIR* stack_increment_;
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 4c2ecc0efd..1827901b1b 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -63,9 +63,9 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- int r_dest = rl_result.low_reg;
- int r_src1 = rl_src1.low_reg;
- int r_src2 = rl_src2.low_reg;
+ int r_dest = rl_result.reg.GetReg();
+ int r_src1 = rl_src1.reg.GetReg();
+ int r_src2 = rl_src2.reg.GetReg();
if (r_dest == r_src2) {
r_src2 = AllocTempFloat();
OpRegCopy(r_src2, r_dest);
@@ -118,9 +118,9 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
- int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
- int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg());
+ int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
if (r_dest == r_src2) {
r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
OpRegCopy(r_src2, r_dest);
@@ -140,7 +140,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
// If the source is in physical register, then put it in its location on stack.
if (rl_src.location == kLocPhysReg) {
- RegisterInfo* lo_info = GetRegInfo(rl_src.low_reg);
+ RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg());
if (lo_info != nullptr && lo_info->is_temp) {
// Calling FlushSpecificReg because it will only write back VR if it is dirty.
@@ -148,7 +148,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.low_reg, rl_src.high_reg);
+ StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
}
}
@@ -181,13 +181,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
if (is_double) {
rl_result = EvalLocWide(rl_dest, kFPReg, true);
- LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg);
+ LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
}
@@ -219,21 +219,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
break;
case Instruction::FLOAT_TO_INT: {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempFloat();
- LoadConstant(rl_result.low_reg, 0x7fffffff);
- NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComissRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+ NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -241,21 +241,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
case Instruction::DOUBLE_TO_INT: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
- LoadConstant(rl_result.low_reg, 0x7fffffff);
- NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+ LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
+ NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
NewLIR2(kX86ComisdRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+ NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -278,18 +278,18 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, rcSrc);
- src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
} else {
rl_src = LoadValue(rl_src, rcSrc);
- src_reg = rl_src.low_reg;
+ src_reg = rl_src.reg.GetReg();
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.low_reg, src_reg);
+ NewLIR2(op, rl_result.reg.GetReg(), src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -302,19 +302,19 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
int src_reg2;
if (single) {
rl_src1 = LoadValue(rl_src1, kFPReg);
- src_reg1 = rl_src1.low_reg;
+ src_reg1 = rl_src1.reg.GetReg();
rl_src2 = LoadValue(rl_src2, kFPReg);
- src_reg2 = rl_src2.low_reg;
+ src_reg2 = rl_src2.reg.GetReg();
} else {
rl_src1 = LoadValueWide(rl_src1, kFPReg);
- src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
}
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
+ LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0);
if (single) {
NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
} else {
@@ -325,20 +325,20 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
- if (rl_result.low_reg >= 4) {
+ if (rl_result.reg.GetReg() >= 4) {
LIR* branch2 = NULL;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
- NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
+ NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
} else {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
- NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
+ NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
}
branch2->target = NewLIR0(kPseudoTargetLabel);
} else {
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
}
- NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
+ NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
if (unordered_gt) {
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -357,14 +357,14 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
- S2d(rl_src2.low_reg, rl_src2.high_reg));
+ NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
+ S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
ConditionCode ccode = mir->meta.ccode;
switch (ccode) {
@@ -418,7 +418,7 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -426,8 +426,8 @@ void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
StoreValueWide(rl_dest, rl_result);
}
@@ -436,8 +436,8 @@ bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kX86SqrtsdRR, S2d(rl_result.low_reg, rl_result.high_reg),
- S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
+ S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
StoreValueWide(rl_dest, rl_result);
return true;
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 5f04b7d152..5900990587 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -216,21 +216,21 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
* mov t1, $false_case
* cmovnz result_reg, t1
*/
- const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.low_reg == rl_result.low_reg);
+ const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
const bool catch_all_case = !(true_zero_case || false_zero_case);
if (true_zero_case || false_zero_case) {
- OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
}
if (true_zero_case || false_zero_case || catch_all_case) {
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
}
if (catch_all_case) {
- OpRegImm(kOpMov, rl_result.low_reg, true_val);
+ OpRegImm(kOpMov, rl_result.reg.GetReg(), true_val);
}
if (true_zero_case || false_zero_case || catch_all_case) {
@@ -239,7 +239,7 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
OpRegImm(kOpMov, temp1_reg, immediateForTemp);
ConditionCode cc = false_zero_case ? kCondEq : kCondNe;
- OpCondRegReg(kOpCmov, cc, rl_result.low_reg, temp1_reg);
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg);
FreeTemp(temp1_reg);
}
@@ -264,15 +264,15 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
*/
// kMirOpSelect is generated just for conditional cases when comparison is done with zero.
- OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
- if (rl_result.low_reg == rl_true.low_reg) {
- OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
- } else if (rl_result.low_reg == rl_false.low_reg) {
- OpCondRegReg(kOpCmov, kCondEq, rl_result.low_reg, rl_true.low_reg);
+ if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
+ OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
+ OpCondRegReg(kOpCmov, kCondEq, rl_result.reg.GetReg(), rl_true.reg.GetReg());
} else {
- OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
}
}
@@ -337,8 +337,8 @@ void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.low_reg;
- int32_t high_reg = rl_src1.high_reg;
+ int32_t low_reg = rl_src1.reg.GetReg();
+ int32_t high_reg = rl_src1.reg.GetHighReg();
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -461,7 +461,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// Assume that the result will be in EDX.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- r2, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG};
// handle div/rem by 1 special case.
if (imm == 1) {
@@ -472,7 +472,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// x % 1 == 0.
LoadConstantNoClobber(r0, 0);
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
}
} else if (imm == -1) { // handle 0x80000000 / -1 special case.
if (is_div) {
@@ -494,7 +494,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
LoadConstantNoClobber(r0, 0);
}
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
} else {
CHECK(imm <= -2 || imm >= 2);
// Use H.S.Warren's Hacker's Delight Chapter 10 and
@@ -524,8 +524,8 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
// We will need the value later.
if (rl_src.location == kLocPhysReg) {
// We can use it directly.
- DCHECK(rl_src.low_reg != r0 && rl_src.low_reg != r2);
- numerator_reg = rl_src.low_reg;
+ DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
+ numerator_reg = rl_src.reg.GetReg();
} else {
LoadValueDirectFixed(rl_src, r1);
numerator_reg = r1;
@@ -582,7 +582,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
NewLIR2(kX86Sub32RR, r0, r2);
// For this case, return the result in EAX.
- rl_result.low_reg = r0;
+ rl_result.reg.SetReg(r0);
}
}
@@ -638,9 +638,9 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
// Result is in EAX for div and EDX for rem.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- r0, INVALID_REG, INVALID_SREG, INVALID_SREG};
+ RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
if (!is_div) {
- rl_result.low_reg = r2;
+ rl_result.reg.SetReg(r2);
}
return rl_result;
}
@@ -662,22 +662,22 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
* The reason is that the first copy will inadvertently clobber the second element with
* the first one thus yielding the wrong result. Thus we do a swap in that case.
*/
- if (rl_result.low_reg == rl_src2.low_reg) {
+ if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
std::swap(rl_src1, rl_src2);
}
// Pick the first integer as min/max.
- OpRegCopy(rl_result.low_reg, rl_src1.low_reg);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg());
// If the integers are both in the same register, then there is nothing else to do
// because they are equal and we have already moved one into the result.
- if (rl_src1.low_reg != rl_src2.low_reg) {
+ if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
// It is possible we didn't pick correctly so do the actual comparison now.
- OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
// Conditionally move the other integer into the destination register.
ConditionCode condition_code = is_min ? kCondGt : kCondLt;
- OpCondRegReg(kOpCmov, condition_code, rl_result.low_reg, rl_src2.low_reg);
+ OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
}
StoreValue(rl_dest, rl_result);
@@ -692,12 +692,12 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Unaligned access is allowed on x86.
- LoadBaseDispWide(rl_address.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
+ LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -711,12 +711,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == kLong) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDispWide(rl_address.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+ StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
+ StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
}
return true;
}
@@ -776,13 +776,13 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
FreeTemp(r0); // Temporarily release EAX for MarkGCCard().
- MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
+ MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
LockTemp(r0);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
LoadValueDirect(rl_src_expected, r0);
- NewLIR5(kX86LockCmpxchgAR, rl_object.low_reg, rl_offset.low_reg, 0, 0, rl_new_value.low_reg);
+ NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
FreeTemp(r0);
}
@@ -790,8 +790,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
// Convert ZF to boolean
RegLocation rl_dest = InlineTarget(info); // boolean place for result
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondZ);
- NewLIR2(kX86Movzx8RR, rl_result.low_reg, rl_result.low_reg);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondZ);
+ NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
return true;
}
@@ -830,11 +830,11 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
}
}
@@ -918,12 +918,11 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
int64_t val = mir_graph_->ConstantValueWide(rl_src2);
if (val == 0) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
- OpRegReg(kOpXor, rl_result.high_reg, rl_result.high_reg);
+ OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());
StoreValueWide(rl_dest, rl_result);
return;
} else if (val == 1) {
- rl_src1 = EvalLocWide(rl_src1, kCoreReg, true);
StoreValueWide(rl_dest, rl_src1);
return;
} else if (val == 2) {
@@ -952,8 +951,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- 1H * 2L
// EAX <- 1L * 2H
if (src1_in_reg) {
- GenImulRegImm(r1, rl_src1.high_reg, val_lo);
- GenImulRegImm(r0, rl_src1.low_reg, val_hi);
+ GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo);
+ GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi);
} else {
GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
@@ -967,7 +966,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
} else {
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -978,7 +977,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r2),
INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
return;
@@ -1000,7 +1000,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- 1H
if (src1_in_reg) {
- NewLIR2(kX86Mov32RR, r1, rl_src1.high_reg);
+ NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1,
kWord, GetSRegHi(rl_src1.s_reg_low));
@@ -1010,7 +1010,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// Take advantage of the fact that the values are the same.
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1023,7 +1023,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
} else {
// EAX <- 2H
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg);
+ NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0,
kWord, GetSRegHi(rl_src2.s_reg_low));
@@ -1031,7 +1031,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EAX <- EAX * 1L (2H * 1L)
if (src1_in_reg) {
- NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg);
+ NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1041,7 +1041,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
+ NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1055,7 +1055,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EAX <- 2L
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.low_reg);
+ NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg());
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0,
kWord, rl_src2.s_reg_low);
@@ -1063,7 +1063,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
+ NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1075,8 +1075,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
- INVALID_SREG, INVALID_SREG};
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
}
@@ -1086,18 +1086,18 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
if (rl_src.location == kLocPhysReg) {
// Both operands are in registers.
- if (rl_dest.low_reg == rl_src.high_reg) {
+ if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_dest.low_reg);
- rl_src.high_reg = temp_reg;
+ OpRegCopy(temp_reg, rl_dest.reg.GetReg());
+ rl_src.reg.SetHighReg(temp_reg);
}
- NewLIR2(x86op, rl_dest.low_reg, rl_src.low_reg);
+ NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
x86op = GetOpcode(op, rl_dest, rl_src, true);
- NewLIR2(x86op, rl_dest.high_reg, rl_src.high_reg);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
return;
}
@@ -1107,11 +1107,11 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_src.s_reg_low);
- LIR *lir = NewLIR3(x86op, rl_dest.low_reg, rBase, displacement + LOWORD_OFFSET);
+ LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rl_dest.high_reg, rBase, displacement + HIWORD_OFFSET);
+ lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
}
@@ -1138,15 +1138,15 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_dest.s_reg_low);
- LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.low_reg);
+ LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg());
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.high_reg);
+ lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- FreeTemp(rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
+ FreeTemp(rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -1188,12 +1188,12 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (IsTemp(rl_src1.low_reg) && IsTemp(rl_src1.high_reg)) {
+ if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else if (is_commutative) {
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
// We need at least one of them to be a temporary.
- if (!(IsTemp(rl_src2.low_reg) && IsTemp(rl_src2.high_reg))) {
+ if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
rl_src1 = ForceTempWide(rl_src1);
}
GenLongRegOrMemOp(rl_src1, rl_src2, op);
@@ -1234,15 +1234,16 @@ void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = ForceTempWide(rl_src);
- if (rl_dest.low_reg == rl_src.high_reg) {
+ if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
+ ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.low_reg);
- rl_result.high_reg = temp_reg;
+ OpRegCopy(temp_reg, rl_result.reg.GetReg());
+ rl_result.reg.SetHighReg(temp_reg);
}
- OpRegReg(kOpNeg, rl_result.low_reg, rl_result.low_reg); // rLow = -rLow
- OpRegImm(kOpAdc, rl_result.high_reg, 0); // rHigh = rHigh + CF
- OpRegReg(kOpNeg, rl_result.high_reg, rl_result.high_reg); // rHigh = -rHigh
+ OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg()); // rLow = -rLow
+ OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0); // rHigh = rHigh + CF
+ OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); // rHigh = -rHigh
StoreValueWide(rl_dest, rl_result);
}
@@ -1284,29 +1285,29 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
// If index is constant, just fold it into the data offset
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.low_reg = INVALID_REG;
+ rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+ GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
len_offset, kThrowArrayBounds);
}
}
rl_result = EvalLoc(rl_dest, reg_class, true);
if ((size == kLong) || (size == kDouble)) {
- LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_result.low_reg,
- rl_result.high_reg, size, INVALID_SREG);
+ LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), size, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
- data_offset, rl_result.low_reg, INVALID_REG, size,
+ LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale,
+ data_offset, rl_result.reg.GetReg(), INVALID_REG, size,
INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
@@ -1338,18 +1339,18 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
constant_index_value = mir_graph_->ConstantValue(rl_index);
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.low_reg = INVALID_REG;
+ rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+ GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
len_offset, kThrowArrayBounds);
}
}
@@ -1359,21 +1360,21 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValue(rl_src, reg_class);
}
// If the src reg can't be byte accessed, move it to a temp first.
- if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
int temp = AllocTemp();
- OpRegCopy(temp, rl_src.low_reg);
- StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+ OpRegCopy(temp, rl_src.reg.GetReg());
+ StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp,
INVALID_REG, size, INVALID_SREG);
} else {
- StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
- rl_src.high_reg, size, INVALID_SREG);
+ StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(),
+ rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG);
}
if (card_mark) {
// Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
if (!constant_index) {
- FreeTemp(rl_index.low_reg);
+ FreeTemp(rl_index.reg.GetReg());
}
- MarkGCCard(rl_src.low_reg, rl_array.low_reg);
+ MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
}
}
@@ -1385,52 +1386,52 @@ RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation
case Instruction::SHL_LONG_2ADDR:
DCHECK_NE(shift_amount, 1); // Prevent a double store from happening.
if (shift_amount == 32) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.high_reg, rl_src.low_reg);
- FreeTemp(rl_src.high_reg);
- NewLIR2(kX86Sal32RI, rl_result.high_reg, shift_amount - 32);
- LoadConstant(rl_result.low_reg, 0);
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
+ FreeTemp(rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetReg(), 0);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shld32RRI, rl_result.high_reg, rl_result.low_reg, shift_amount);
- NewLIR2(kX86Sal32RI, rl_result.low_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount);
+ NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR2(kX86Sar32RI, rl_result.low_reg, shift_amount - 32);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32);
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
- NewLIR2(kX86Sar32RI, rl_result.high_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.low_reg, rl_src.high_reg);
- NewLIR2(kX86Shr32RI, rl_result.low_reg, shift_amount - 32);
- LoadConstant(rl_result.high_reg, 0);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
+ NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32);
+ LoadConstant(rl_result.reg.GetHighReg(), 0);
} else {
- OpRegCopy(rl_result.low_reg, rl_src.low_reg);
- OpRegCopy(rl_result.high_reg, rl_src.high_reg);
- NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
- NewLIR2(kX86Shr32RI, rl_result.high_reg, shift_amount);
+ OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
+ NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
}
break;
default:
@@ -1567,7 +1568,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_h
int32_t value) {
bool in_mem = loc.location != kLocPhysReg;
bool byte_imm = IS_SIMM8(value);
- DCHECK(in_mem || !IsFpReg(loc.low_reg));
+ DCHECK(in_mem || !IsFpReg(loc.reg.GetReg()));
switch (op) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -1647,15 +1648,15 @@ void X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
DCHECK_EQ(rl_result.location, kLocPhysReg);
- DCHECK(!IsFpReg(rl_result.low_reg));
+ DCHECK(!IsFpReg(rl_result.reg.GetReg()));
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.low_reg, val_lo);
+ NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.high_reg, val_hi);
+ NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
}
StoreValueWide(rl_dest, rl_result);
}
@@ -1671,15 +1672,15 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
- rl_dest.low_reg == rl_src1.low_reg && rl_dest.high_reg == rl_src1.high_reg &&
- !IsFpReg(rl_dest.low_reg)) {
+ rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
+ !IsFpReg(rl_dest.reg.GetReg())) {
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
- NewLIR2(x86op, rl_dest.low_reg, val_lo);
+ NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
- NewLIR2(x86op, rl_dest.high_reg, val_hi);
+ NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi);
}
StoreFinalValueWide(rl_dest, rl_dest);
@@ -1693,11 +1694,11 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_result = ForceTempWide(rl_src1);
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.low_reg, val_lo);
+ NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.high_reg, val_hi);
+ NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
}
StoreFinalValueWide(rl_dest, rl_result);
@@ -1709,17 +1710,17 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
RegLocation rl_dest, RegLocation rl_src) {
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.low_reg;
+ int result_reg = rl_result.reg.GetReg();
// SETcc only works with EAX..EDX.
- if (result_reg == object.low_reg || result_reg >= 4) {
+ if (result_reg == object.reg.GetReg() || result_reg >= 4) {
result_reg = AllocTypedTemp(false, kCoreReg);
DCHECK_LT(result_reg, 4);
}
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
@@ -1730,11 +1731,11 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(rl_method.low_reg,
+ LoadWordDisp(rl_method.reg.GetReg(),
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
LoadWordDisp(check_class, offset_of_type, check_class);
@@ -1755,7 +1756,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Compare the computed class to the class in the object.
DCHECK_EQ(object.location, kLocPhysReg);
- OpRegMem(kOpCmp, check_class, object.low_reg,
+ OpRegMem(kOpCmp, check_class, object.reg.GetReg(),
mirror::Object::ClassOffset().Int32Value());
// Set the low byte of the result to 0 or 1 from the compare condition code.
@@ -1765,7 +1766,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
null_branchover->target = target;
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.low_reg, result_reg);
+ OpRegCopy(rl_result.reg.GetReg(), result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1818,7 +1819,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
RegLocation rl_result = GetReturn(false);
// SETcc only works with EAX..EDX.
- DCHECK_LT(rl_result.low_reg, 4);
+ DCHECK_LT(rl_result.reg.GetReg(), 4);
// Is the class NULL?
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1830,13 +1831,13 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
LIR* branchover = nullptr;
if (type_known_final) {
// Ensure top 3 bytes of result are 0.
- LoadConstant(rl_result.low_reg, 0);
+ LoadConstant(rl_result.reg.GetReg(), 0);
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
// Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq);
+ NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
} else {
if (!type_known_abstract) {
- LoadConstant(rl_result.low_reg, 1); // Assume result succeeds.
+ LoadConstant(rl_result.reg.GetReg(), 1); // Assume result succeeds.
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
@@ -1964,7 +1965,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = UpdateLoc(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.low_reg, rl_lhs.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg());
} else {
if (shift_op) {
// X86 doesn't require masking and must use ECX.
@@ -1979,9 +1980,9 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
OpMemReg(op, rl_result, t_reg);
FreeTemp(t_reg);
return;
- } else if (!IsFpReg(rl_result.low_reg)) {
+ } else if (!IsFpReg(rl_result.reg.GetReg())) {
// Can do this directly into the result register
- OpRegReg(op, rl_result.low_reg, t_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), t_reg);
FreeTemp(t_reg);
StoreFinalValue(rl_dest, rl_result);
return;
@@ -1990,7 +1991,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Three address form, or we can't do directly.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, t_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg);
FreeTemp(t_reg);
} else {
// Multiply is 3 operand only (sort of).
@@ -2001,11 +2002,11 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Can we do this from memory directly?
rl_rhs = UpdateLoc(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
- OpRegMem(op, rl_result.low_reg, rl_rhs);
+ OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
StoreFinalValue(rl_dest, rl_result);
return;
- } else if (!IsFpReg(rl_rhs.low_reg)) {
- OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
+ } else if (!IsFpReg(rl_rhs.reg.GetReg())) {
+ OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
StoreFinalValue(rl_dest, rl_result);
return;
}
@@ -2013,17 +2014,17 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_rhs = LoadValue(rl_rhs, kCoreReg);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
- OpMemReg(op, rl_result, rl_rhs.low_reg);
+ OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
return;
- } else if (!IsFpReg(rl_result.low_reg)) {
+ } else if (!IsFpReg(rl_result.reg.GetReg())) {
// Can do this directly into the result register.
- OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
+ OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
StoreFinalValue(rl_dest, rl_result);
return;
} else {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
} else {
// Try to use reg/memory instructions.
@@ -2035,34 +2036,34 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
} else {
// We can optimize by moving to result and using memory operands.
if (rl_rhs.location != kLocPhysReg) {
// Force LHS into result.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadValueDirect(rl_lhs, rl_result.low_reg);
- OpRegMem(op, rl_result.low_reg, rl_rhs);
+ LoadValueDirect(rl_lhs, rl_result.reg.GetReg());
+ OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
} else if (rl_lhs.location != kLocPhysReg) {
// RHS is in a register; LHS is in memory.
if (op != kOpSub) {
// Force RHS into result and operate on memory.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopy(rl_result.low_reg, rl_rhs.low_reg);
- OpRegMem(op, rl_result.low_reg, rl_lhs);
+ OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegMem(op, rl_result.reg.GetReg(), rl_lhs);
} else {
// Subtraction isn't commutative.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
} else {
// Both are in registers.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
+ OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
}
}
}
@@ -2073,10 +2074,10 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
// If we have non-core registers, then we can't do good things.
- if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.low_reg)) {
+ if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) {
return false;
}
- if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.low_reg)) {
+ if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) {
return false;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index eea7191c3b..ef8be3cc61 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -49,23 +49,19 @@ namespace art {
};
RegLocation X86Mir2Lir::LocCReturn() {
- RegLocation res = X86_LOC_C_RETURN;
- return res;
+ return x86_loc_c_return;
}
RegLocation X86Mir2Lir::LocCReturnWide() {
- RegLocation res = X86_LOC_C_RETURN_WIDE;
- return res;
+ return x86_loc_c_return_wide;
}
RegLocation X86Mir2Lir::LocCReturnFloat() {
- RegLocation res = X86_LOC_C_RETURN_FLOAT;
- return res;
+ return x86_loc_c_return_float;
}
RegLocation X86Mir2Lir::LocCReturnDouble() {
- RegLocation res = X86_LOC_C_RETURN_DOUBLE;
- return res;
+ return x86_loc_c_return_double;
}
// Return a target-dependent special register.
@@ -390,19 +386,19 @@ void X86Mir2Lir::ClobberCallerSave() {
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- CHECK(res.low_reg == rAX);
- CHECK(res.high_reg == rDX);
+ CHECK(res.reg.GetReg() == rAX);
+ CHECK(res.reg.GetHighReg() == rDX);
Clobber(rAX);
Clobber(rDX);
MarkInUse(rAX);
MarkInUse(rDX);
- MarkPair(res.low_reg, res.high_reg);
+ MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
return res;
}
RegLocation X86Mir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.low_reg = rDX;
+ res.reg.SetReg(rDX);
Clobber(rDX);
MarkInUse(rDX);
return res;
@@ -430,27 +426,21 @@ void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
NewLIR0(kX86Mfence);
#endif
}
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
- int reg_class) {
+
+// Alloc a pair of core registers, or a double.
+RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
- int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg; // only one allocated!
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ // TODO: take advantage of 64-bit notation.
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
-
low_reg = AllocTemp();
high_reg = AllocTemp();
- res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
- return res;
+ return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
}
int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -493,11 +483,11 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
- (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
+ (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
// No overlap, free both
- FreeTemp(rl_free.low_reg);
- FreeTemp(rl_free.high_reg);
+ FreeTemp(rl_free.reg.GetReg());
+ FreeTemp(rl_free.reg.GetHighReg());
}
}
@@ -539,7 +529,8 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
: Mir2Lir(cu, mir_graph, arena),
method_address_insns_(arena, 100, kGrowableArrayMisc),
class_type_address_insns_(arena, 100, kGrowableArrayMisc),
- call_method_insns_(arena, 100, kGrowableArrayMisc) {
+ call_method_insns_(arena, 100, kGrowableArrayMisc),
+ stack_decrement_(nullptr), stack_increment_(nullptr) {
store_method_addr_used_ = false;
for (int i = 0; i < kX86Last; i++) {
if (X86Mir2Lir::EncodingMap[i].opcode != i) {
@@ -601,11 +592,11 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
if (match) {
// We can reuse;update the register usage info.
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_lo->reg; // Play nice with existing code.
loc.location = kLocPhysReg;
loc.vec_len = kVectorLength8;
- DCHECK(IsFpReg(loc.low_reg));
+ // TODO: use k64BitVector
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
+ DCHECK(IsFpReg(loc.reg.GetReg()));
return loc;
}
// We can't easily reuse; clobber and free any overlaps.
@@ -635,11 +626,10 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
}
if (match) {
// Can reuse - update the register usage info
- loc.low_reg = info_lo->reg;
- loc.high_reg = info_hi->reg;
+ loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
loc.location = kLocPhysReg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -663,7 +653,6 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
// TODO: Reunify with common code after 'pair mess' has been fixed
RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -671,38 +660,37 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
/* If it is already in a register, we can assume proper form. Is it the right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar());
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar());
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* It is the wrong register class. Reallocate and copy. */
- if (!IsFpReg(loc.low_reg)) {
+ if (!IsFpReg(loc.reg.GetReg())) {
// We want this in a FP reg, and it is in core registers.
DCHECK(reg_class != kCoreReg);
// Allocate this into any FP reg, and mark it with the right size.
low_reg = AllocTypedTemp(true, reg_class);
- OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = low_reg; // Play nice with existing code.
+ OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg.SetReg(low_reg);
+ loc.reg.SetHighReg(low_reg); // Play nice with existing code.
loc.vec_len = kVectorLength8;
} else {
// The value is in a FP register, and we want it in a pair of core registers.
DCHECK_EQ(reg_class, kCoreReg);
- DCHECK_EQ(loc.low_reg, loc.high_reg);
- new_regs = AllocTypedTempPair(false, kCoreReg); // Force to core registers.
- low_reg = new_regs & 0xff;
- high_reg = (new_regs >> 8) & 0xff;
+ DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg());
+ RegStorage new_regs = AllocTypedTempWide(false, kCoreReg); // Force to core registers.
+ low_reg = new_regs.GetReg();
+ high_reg = new_regs.GetHighReg();
DCHECK_NE(low_reg, high_reg);
- OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
- CopyRegInfo(low_reg, loc.low_reg);
- CopyRegInfo(high_reg, loc.high_reg);
- Clobber(loc.low_reg);
- Clobber(loc.high_reg);
- loc.low_reg = low_reg;
- loc.high_reg = high_reg;
- MarkPair(loc.low_reg, loc.high_reg);
- DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
+ CopyRegInfo(low_reg, loc.reg.GetReg());
+ CopyRegInfo(high_reg, loc.reg.GetHighReg());
+ Clobber(loc.reg.GetReg());
+ Clobber(loc.reg.GetHighReg());
+ loc.reg = new_regs;
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
}
}
return loc;
@@ -711,21 +699,20 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- new_regs = AllocTypedTempPair(loc.fp, reg_class);
- loc.low_reg = new_regs & 0xff;
- loc.high_reg = (new_regs >> 8) & 0xff;
+ loc.reg = AllocTypedTempWide(loc.fp, reg_class);
- if (loc.low_reg == loc.high_reg) {
- DCHECK(IsFpReg(loc.low_reg));
+ // FIXME: take advantage of RegStorage notation.
+ if (loc.reg.GetReg() == loc.reg.GetHighReg()) {
+ DCHECK(IsFpReg(loc.reg.GetReg()));
loc.vec_len = kVectorLength8;
} else {
- MarkPair(loc.low_reg, loc.high_reg);
+ MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
}
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
- if (loc.low_reg != loc.high_reg) {
- MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
+ MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
}
}
return loc;
@@ -741,14 +728,14 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.low_reg)) {
+ if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
/* Wrong register class. Realloc, copy and transfer ownership. */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.low_reg);
- CopyRegInfo(new_reg, loc.low_reg);
- Clobber(loc.low_reg);
- loc.low_reg = new_reg;
- if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
+ OpRegCopy(new_reg, loc.reg.GetReg());
+ CopyRegInfo(new_reg, loc.reg.GetReg());
+ Clobber(loc.reg.GetReg());
+ loc.reg.SetReg(new_reg);
+ if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
}
return loc;
@@ -756,14 +743,13 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.low_reg = new_reg;
- if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
+ loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class));
+ if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.low_reg, loc.s_reg_low);
+ MarkLive(loc.reg.GetReg(), loc.s_reg_low);
}
return loc;
}
@@ -776,15 +762,15 @@ int X86Mir2Lir::AllocTempDouble() {
// TODO: Reunify with common code after 'pair mess' has been fixed
void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.low_reg);
- if (IsFpReg(rl.low_reg)) {
+ RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
+ if (IsFpReg(rl.reg.GetReg())) {
// We are using only the low register.
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
}
- ResetDef(rl.low_reg);
+ ResetDef(rl.reg.GetReg());
} else {
- RegisterInfo* p_high = IsTemp(rl.high_reg);
+ RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -792,8 +778,8 @@ void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.low_reg);
- ResetDef(rl.high_reg);
+ ResetDef(rl.reg.GetReg());
+ ResetDef(rl.reg.GetHighReg());
}
}
@@ -832,8 +818,8 @@ void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
<< (loc.high_word ? " h" : " ")
<< (loc.home ? " H" : " ")
<< " vec_len: " << loc.vec_len
- << ", low: " << static_cast<int>(loc.low_reg)
- << ", high: " << static_cast<int>(loc.high_reg)
+ << ", low: " << static_cast<int>(loc.reg.GetReg())
+ << ", high: " << static_cast<int>(loc.reg.GetHighReg())
<< ", s_reg: " << loc.s_reg_low
<< ", orig: " << loc.orig_sreg;
}
@@ -1036,8 +1022,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Runtime start index.
rl_start = UpdateLoc(rl_start);
if (rl_start.location == kLocPhysReg) {
- length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr);
- OpRegReg(kOpSub, rCX, rl_start.low_reg);
+ length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr);
+ OpRegReg(kOpSub, rCX, rl_start.reg.GetReg());
} else {
// Compare to memory to avoid a register load. Handle pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
@@ -1066,13 +1052,13 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
}
} else {
if (rl_start.location == kLocPhysReg) {
- if (rl_start.low_reg == rDI) {
+ if (rl_start.reg.GetReg() == rDI) {
// We have a slight problem here. We are already using RDI!
// Grab the value from the stack.
LoadWordDisp(rX86_SP, 0, rDX);
OpLea(rDI, rBX, rDX, 1, 0);
} else {
- OpLea(rDI, rBX, rl_start.low_reg, 1, 0);
+ OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0);
}
} else {
OpRegCopy(rDI, rBX);
@@ -1094,14 +1080,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// index = ((curr_ptr - orig_ptr) / 2) - 1.
OpRegReg(kOpSub, rDI, rBX);
OpRegImm(kOpAsr, rDI, 1);
- NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1);
+ NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
LIR *all_done = NewLIR1(kX86Jmp8, 0);
// Failed to match; return -1.
LIR *not_found = NewLIR0(kPseudoTargetLabel);
length_compare->target = not_found;
failed_branch->target = not_found;
- LoadConstantNoClobber(rl_return.low_reg, -1);
+ LoadConstantNoClobber(rl_return.reg.GetReg(), -1);
// And join up at the end.
all_done->target = NewLIR0(kPseudoTargetLabel);
@@ -1118,4 +1104,166 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
return true;
}
+/*
+ * @brief Enter a 32 bit quantity into the FDE buffer
+ * @param buf FDE buffer.
+ * @param data Data value.
+ */
+static void PushWord(std::vector<uint8_t>&buf, int data) {
+ buf.push_back(data & 0xff);
+ buf.push_back((data >> 8) & 0xff);
+ buf.push_back((data >> 16) & 0xff);
+ buf.push_back((data >> 24) & 0xff);
+}
+
+/*
+ * @brief Enter an 'advance LOC' into the FDE buffer
+ * @param buf FDE buffer.
+ * @param increment Amount by which to increase the current location.
+ */
+static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
+ if (increment < 64) {
+ // Encoding in opcode.
+ buf.push_back(0x1 << 6 | increment);
+ } else if (increment < 256) {
+ // Single byte delta.
+ buf.push_back(0x02);
+ buf.push_back(increment);
+ } else if (increment < 256 * 256) {
+ // Two byte delta.
+ buf.push_back(0x03);
+ buf.push_back(increment & 0xff);
+ buf.push_back((increment >> 8) & 0xff);
+ } else {
+ // Four byte delta.
+ buf.push_back(0x04);
+ PushWord(buf, increment);
+ }
+}
+
+
+std::vector<uint8_t>* X86CFIInitialization() {
+ return X86Mir2Lir::ReturnCommonCallFrameInformation();
+}
+
+std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
+ std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+ // Length of the CIE (except for this field).
+ PushWord(*cfi_info, 16);
+
+ // CIE id.
+ PushWord(*cfi_info, 0xFFFFFFFFU);
+
+ // Version: 3.
+ cfi_info->push_back(0x03);
+
+ // Augmentation: empty string.
+ cfi_info->push_back(0x0);
+
+ // Code alignment: 1.
+ cfi_info->push_back(0x01);
+
+ // Data alignment: -4.
+ cfi_info->push_back(0x7C);
+
+ // Return address register (R8).
+ cfi_info->push_back(0x08);
+
+ // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
+ cfi_info->push_back(0x0C);
+ cfi_info->push_back(0x04);
+ cfi_info->push_back(0x04);
+
+ // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
+ cfi_info->push_back(0x2 << 6 | 0x08);
+ cfi_info->push_back(0x01);
+
+ // And 2 Noops to align to 4 byte boundary.
+ cfi_info->push_back(0x0);
+ cfi_info->push_back(0x0);
+
+ DCHECK_EQ(cfi_info->size() & 3, 0U);
+ return cfi_info;
+}
+
+static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
+ uint8_t buffer[12];
+ uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
+ for (uint8_t *p = buffer; p < ptr; p++) {
+ buf.push_back(*p);
+ }
+}
+
+std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
+ std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+ // Generate the FDE for the method.
+ DCHECK_NE(data_offset_, 0U);
+
+ // Length (will be filled in later in this routine).
+ PushWord(*cfi_info, 0);
+
+ // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
+ // one CIE for the whole debug_frame section.
+ PushWord(*cfi_info, 0);
+
+ // 'initial_location' (filled in by linker).
+ PushWord(*cfi_info, 0);
+
+ // 'address_range' (number of bytes in the method).
+ PushWord(*cfi_info, data_offset_);
+
+ // The instructions in the FDE.
+ if (stack_decrement_ != nullptr) {
+ // Advance LOC to just past the stack decrement.
+ uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
+ AdvanceLoc(*cfi_info, pc);
+
+ // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
+ cfi_info->push_back(0x0e);
+ EncodeUnsignedLeb128(*cfi_info, frame_size_);
+
+ // We continue with that stack until the epilogue.
+ if (stack_increment_ != nullptr) {
+ uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
+ AdvanceLoc(*cfi_info, new_pc - pc);
+
+ // We probably have code snippets after the epilogue, so save the
+ // current state: DW_CFA_remember_state.
+ cfi_info->push_back(0x0a);
+
+ // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
+ // PC on the stack now.
+ cfi_info->push_back(0x0e);
+ EncodeUnsignedLeb128(*cfi_info, 4);
+
+ // Everything after that is the same as before the epilogue.
+ // Stack bump was followed by RET instruction.
+ LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
+ if (post_ret_insn != nullptr) {
+ pc = new_pc;
+ new_pc = post_ret_insn->offset;
+ AdvanceLoc(*cfi_info, new_pc - pc);
+ // Restore the state: DW_CFA_restore_state.
+ cfi_info->push_back(0x0b);
+ }
+ }
+ }
+
+ // Padding to a multiple of 4
+ while ((cfi_info->size() & 3) != 0) {
+ // DW_CFA_nop is encoded as 0.
+ cfi_info->push_back(0);
+ }
+
+ // Set the length of the FDE inside the generated bytes.
+ uint32_t length = cfi_info->size() - 4;
+ (*cfi_info)[0] = length;
+ (*cfi_info)[1] = length >> 8;
+ (*cfi_info)[2] = length >> 16;
+ (*cfi_info)[3] = length >> 24;
+ return cfi_info;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 48a39bb5b4..d5d6b0e348 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -514,7 +514,7 @@ LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
// We don't know the proper offset for the value, so pick one that will force
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
- res = LoadBaseDisp(rl_method.low_reg, 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
+ res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
res->target = data_target;
res->flags.fixup = kFixupLoad;
SetMemRefType(res, true, kLiteral);
@@ -714,7 +714,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
break;
default:
- LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+ LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
}
if (!is_array) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 4064bd6550..09cbbeec82 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -126,13 +126,6 @@ namespace art {
/* Mask to strip off fp flags */
#define X86_FP_REG_MASK 0xF
-// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
-// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low
-#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
-#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG}
-
enum X86ResourceEncodingPos {
kX86GPReg0 = 0,
kX86RegSP = 4,
@@ -211,6 +204,22 @@ enum X86NativeRegisterPool {
#define rX86_COUNT rCX
#define rX86_PC INVALID_REG
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+const RegLocation x86_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
+const RegLocation x86_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+ RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
+// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
+const RegLocation x86_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
+ RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
+// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
+const RegLocation x86_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
+ RegStorage(RegStorage::k64BitPair, fr0, fr0), INVALID_SREG, INVALID_SREG};
+
/*
* The following enum defines the list of supported X86 instructions by the
* assembler. Their corresponding EncodingMap positions will be defined in
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
new file mode 100644
index 0000000000..c59617edea
--- /dev/null
+++ b/compiler/dex/reg_storage.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_REG_STORAGE_H_
+#define ART_COMPILER_DEX_REG_STORAGE_H_
+
+
+namespace art {
+
+/*
+ * Representation of the physical register, register pair or vector holding a Dalvik value.
+ * The basic configuration of the storage (i.e. solo reg, pair, vector) is common across all
+ * targets, but the encoding of the actual storage element is target independent.
+ *
+ * The two most-significant bits describe the basic shape of the storage, while meaning of the
+ * lower 14 bits depends on the shape:
+ *
+ * [PW]
+ * P: 0 -> pair, 1 -> solo (or vector)
+ * W: 1 -> 64 bits, 0 -> 32 bits
+ *
+ * [00] [xxxxxxxxxxxxxx] Invalid (typically all zeros)
+ * [01] [HHHHHHH] [LLLLLLL] 64-bit storage, composed of 2 32-bit registers
+ * [10] [0] [xxxxxx] [RRRRRRR] 32-bit solo register
+ * [11] [0] [xxxxxx] [RRRRRRR] 64-bit solo register
+ * [10] [1] [xxxxxx] [VVVVVVV] 32-bit vector storage
+ * [11] [1] [xxxxxx] [VVVVVVV] 64-bit vector storage
+ *
+ * x - don't care
+ * L - low register number of a pair
+ * H - high register number of a pair
+ * R - register number of a solo reg
+ * V - vector description
+ *
+ * Note that in all non-invalid cases, the low 7 bits must be sufficient to describe
+ * whether the storage element is floating point (see IsFloatReg()).
+ *
+ */
+
+class RegStorage {
+ public:
+ enum RegStorageKind {
+ kInvalid = 0x0000,
+ k64BitPair = 0x4000,
+ k32BitSolo = 0x8000,
+ k64BitSolo = 0xc000,
+ k32BitVector = 0xa000,
+ k64BitVector = 0xe000,
+ kPairMask = 0x8000,
+ kPair = 0x0000,
+ kSizeMask = 0x4000,
+ k64Bit = 0x4000,
+ k32Bit = 0x0000,
+ kVectorMask = 0xa000,
+ kVector = 0xa000,
+ kSolo = 0x8000,
+ kShapeMask = 0xc000,
+ kKindMask = 0xe000
+ };
+
+ static const uint16_t kRegValMask = 0x007f;
+ static const uint16_t kHighRegShift = 7;
+ static const uint16_t kHighRegMask = kRegValMask << kHighRegShift;
+
+ RegStorage(RegStorageKind rs_kind, int reg) {
+ DCHECK_NE(rs_kind & kShapeMask, kInvalid);
+ DCHECK_NE(rs_kind & kShapeMask, k64BitPair);
+ DCHECK_EQ(rs_kind & ~kKindMask, 0);
+ DCHECK_EQ(reg & ~kRegValMask, 0);
+ reg_ = rs_kind | reg;
+ }
+ RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg) {
+ DCHECK_EQ(rs_kind, k64BitPair);
+ DCHECK_EQ(low_reg & ~kRegValMask, 0);
+ DCHECK_EQ(high_reg & ~kRegValMask, 0);
+ reg_ = rs_kind | (high_reg << kHighRegShift) | low_reg;
+ }
+ explicit RegStorage(uint16_t val) : reg_(val) {}
+ RegStorage() : reg_(kInvalid) {}
+ ~RegStorage() {}
+
+ bool IsInvalid() const {
+ return ((reg_ & kShapeMask) == kInvalid);
+ }
+
+ bool Is32Bit() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kSizeMask) == k32Bit);
+ }
+
+ bool Is64Bit() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kSizeMask) == k64Bit);
+ }
+
+ bool IsPair() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kPairMask) == kPair);
+ }
+
+ bool IsSolo() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kVectorMask) == kSolo);
+ }
+
+ bool IsVector() const {
+ DCHECK(!IsInvalid());
+ return ((reg_ & kVectorMask) == kVector);
+ }
+
+ // Used to retrieve either the low register of a pair, or the only register.
+ int GetReg() const {
+ DCHECK(!IsInvalid());
+ return (reg_ & kRegValMask);
+ }
+
+ void SetReg(int reg) {
+ DCHECK(!IsInvalid());
+ reg_ = (reg_ & ~kRegValMask) | reg;
+ DCHECK_EQ(GetReg(), reg);
+ }
+
+ // Retrieve the most significant register of a pair.
+ int GetHighReg() const {
+ DCHECK(IsPair());
+ return (reg_ & kHighRegMask) >> kHighRegShift;
+ }
+
+ void SetHighReg(int reg) {
+ DCHECK(IsPair());
+ reg_ = (reg_ & ~kHighRegMask) | (reg << kHighRegShift);
+ DCHECK_EQ(GetHighReg(), reg);
+ }
+
+ int GetRawBits() const {
+ return reg_;
+ }
+
+ private:
+ uint16_t reg_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_REG_STORAGE_H_
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index f8dc223af7..4d2c05166b 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -379,16 +379,14 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
if (cg != NULL) {
for (int i = 0; i < count; i++) {
- LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
+ LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d",
table[i].orig_sreg, storage_name[table[i].location],
table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
table[i].is_const ? 'c' : 'n',
table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
- cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
- table[i].low_reg & cg->FpRegMask(),
- cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
- table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
+ table[i].reg.GetRawBits(),
+ table[i].s_reg_low);
}
} else {
// Either pre-regalloc or Portable.
@@ -404,9 +402,9 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
}
}
-static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
- kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG,
- INVALID_SREG};
+// FIXME - will likely need to revisit all uses of this.
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
+ RegStorage(), INVALID_SREG, INVALID_SREG};
void MIRGraph::InitRegLocations() {
/* Allocate the location map */
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
new file mode 100644
index 0000000000..d401398ca4
--- /dev/null
+++ b/compiler/driver/compiler_driver-inl.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+#define ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+
+#include "compiler_driver.h"
+#include "dex/compiler_ir.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/art_field-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
+ return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+}
+
+inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
+ const DexCompilationUnit* mUnit) {
+ return soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+}
+
+inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ const DexFile::MethodId& referrer_method_id =
+ mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
+ mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
+ *mUnit->GetDexFile(), referrer_method_id.class_idx_, dex_cache, class_loader);
+ DCHECK_EQ(referrer_class == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(referrer_class == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ }
+ return referrer_class;
+}
+
+inline mirror::ArtField* CompilerDriver::ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
+ *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ // ClassLinker can return a field of the wrong kind directly from the DexCache.
+ // Silently return nullptr on such incompatible class change.
+ return nullptr;
+ }
+ return resolved_field;
+}
+
+inline void CompilerDriver::GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
+ mirror::Class* declaring_class = resolved_field->GetDeclaringClass();
+ *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
+ *declaring_class_idx = declaring_class->GetDexTypeIndex();
+ *declaring_field_idx = resolved_field->GetDexFieldIndex();
+}
+
+inline bool CompilerDriver::IsFieldVolatile(mirror::ArtField* field) {
+ return field->IsVolatile();
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset) {
+ DCHECK(!resolved_field->IsStatic());
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ bool fast_get = referrer_class != nullptr &&
+ referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx);
+ bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class);
+ *field_offset = fast_get ? resolved_field->GetOffset() : MemberOffset(0u);
+ return std::make_pair(fast_get, fast_put);
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized) {
+ DCHECK(resolved_field->IsStatic());
+ if (LIKELY(referrer_class != nullptr)) {
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ if (fields_class == referrer_class) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = fields_class->GetDexTypeIndex();
+ *is_referrers_class = true; // implies no worrying about class initialization
+ *is_initialized = true;
+ return std::make_pair(true, true);
+ }
+ if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx)) {
+ // We have the resolved field, we must make it into a index for the referrer
+ // in its static storage (which may fail if it doesn't have a slot for it)
+ // TODO: for images we can elide the static storage base null check
+ // if we know there's a non-null entry in the image
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint32_t storage_idx = DexFile::kDexNoIndex;
+ if (LIKELY(fields_class->GetDexCache() == dex_cache)) {
+ // common case where the dex cache of both the referrer and the field are the same,
+ // no need to search the dex file
+ storage_idx = fields_class->GetDexTypeIndex();
+ } else {
+ // Search dex file for localized ssb index, may fail if field's class is a parent
+ // of the class mentioned in the dex file and there is no dex cache entry.
+ const DexFile::StringId* string_id =
+ dex_file->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
+ if (string_id != nullptr) {
+ const DexFile::TypeId* type_id =
+ dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+ if (type_id != nullptr) {
+ // medium path, needs check of static storage base being initialized
+ storage_idx = dex_file->GetIndexForTypeId(*type_id);
+ }
+ }
+ }
+ if (storage_idx != DexFile::kDexNoIndex) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = storage_idx;
+ *is_referrers_class = false;
+ *is_initialized = fields_class->IsInitialized() &&
+ CanAssumeTypeIsPresentInDexCache(*dex_file, storage_idx);
+ return std::make_pair(true, !resolved_field->IsFinal());
+ }
+ }
+ }
+ // Conservative defaults.
+ *field_offset = MemberOffset(0u);
+ *storage_index = DexFile::kDexNoIndex;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ return std::make_pair(false, false);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1b284de9cc..fc22addbf1 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,11 +26,13 @@
#include "base/timing_logger.h"
#include "class_linker.h"
#include "compiler_backend.h"
+#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
#include "dex/verification_results.h"
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
+#include "driver/compiler_options.h"
#include "jni_internal.h"
#include "object_utils.h"
#include "runtime.h"
@@ -323,10 +325,12 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
support_boot_image_fixup_(instruction_set != kMips),
+ cfi_info_(nullptr),
dedupe_code_("dedupe code"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
- dedupe_gc_map_("dedupe gc map") {
+ dedupe_gc_map_("dedupe gc map"),
+ dedupe_cfi_info_("dedupe cfi info") {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
@@ -341,6 +345,11 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
if (!image_) {
CHECK(image_classes_.get() == NULL);
}
+
+ // Are we generating CFI information?
+ if (compiler_options->GetGenerateGDBInformation()) {
+ cfi_info_.reset(compiler_backend_->GetCallFrameInformationInitialization(*this));
+ }
}
std::vector<uint8_t>* CompilerDriver::DeduplicateCode(const std::vector<uint8_t>& code) {
@@ -359,6 +368,13 @@ std::vector<uint8_t>* CompilerDriver::DeduplicateGCMap(const std::vector<uint8_t
return dedupe_gc_map_.Add(Thread::Current(), code);
}
+std::vector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info) {
+ if (cfi_info == nullptr) {
+ return nullptr;
+ }
+ return dedupe_cfi_info_.Add(Thread::Current(), *cfi_info);
+}
+
CompilerDriver::~CompilerDriver() {
Thread* self = Thread::Current();
{
@@ -424,6 +440,11 @@ const std::vector<uint8_t>* CompilerDriver::CreatePortableToInterpreterBridge()
PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge));
}
+const std::vector<uint8_t>* CompilerDriver::CreateQuickGenericJniTrampoline() const {
+ return CreateTrampoline(instruction_set_, kQuickAbi,
+ QUICK_ENTRYPOINT_OFFSET(pQuickGenericJniTrampoline));
+}
+
const std::vector<uint8_t>* CompilerDriver::CreateQuickImtConflictTrampoline() const {
return CreateTrampoline(instruction_set_, kQuickAbi,
QUICK_ENTRYPOINT_OFFSET(pQuickImtConflictTrampoline));
@@ -441,11 +462,11 @@ const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() con
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings) {
+ TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
- PreCompile(class_loader, dex_files, *thread_pool.get(), timings);
- Compile(class_loader, dex_files, *thread_pool.get(), timings);
+ PreCompile(class_loader, dex_files, thread_pool.get(), timings);
+ Compile(class_loader, dex_files, thread_pool.get(), timings);
if (dump_stats_) {
stats_->Dump();
}
@@ -483,7 +504,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
}
}
-void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings) {
+void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
Thread* self = Thread::Current();
jobject jclass_loader;
@@ -510,7 +531,7 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings
dex_files.push_back(dex_file);
UniquePtr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", 0U));
- PreCompile(jclass_loader, dex_files, *thread_pool.get(), timings);
+ PreCompile(jclass_loader, dex_files, thread_pool.get(), timings);
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
@@ -531,7 +552,7 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger& timings
}
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -540,7 +561,7 @@ void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFi
}
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
LoadImageClasses(timings);
Resolve(class_loader, dex_files, thread_pool, timings);
@@ -625,13 +646,13 @@ static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
}
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger& timings)
+void CompilerDriver::LoadImageClasses(TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
if (!IsImage()) {
return;
}
- timings.NewSplit("LoadImageClasses");
+ timings->NewSplit("LoadImageClasses");
// Make a first class to load all classes explicitly listed in the file
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -713,9 +734,9 @@ void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void
MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get());
}
-void CompilerDriver::UpdateImageClasses(TimingLogger& timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
if (IsImage()) {
- timings.NewSplit("UpdateImageClasses");
+ timings->NewSplit("UpdateImageClasses");
// Update image_classes_ with classes for objects created by <clinit> methods.
Thread* self = Thread::Current();
@@ -886,6 +907,24 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
}
}
+void CompilerDriver::ProcessedInstanceField(bool resolved) {
+ if (!resolved) {
+ stats_->UnresolvedInstanceField();
+ } else {
+ stats_->ResolvedInstanceField();
+ }
+}
+
+void CompilerDriver::ProcessedStaticField(bool resolved, bool local) {
+ if (!resolved) {
+ stats_->UnresolvedStaticField();
+ } else if (local) {
+ stats_->ResolvedLocalStaticField();
+ } else {
+ stats_->ResolvedStaticField();
+ }
+}
+
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
@@ -903,15 +942,6 @@ static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
dex_cache, class_loader);
}
-static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(
- ScopedObjectAccess& soa, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache,
- class_loader, is_static);
-}
-
static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit,
uint32_t method_idx,
@@ -947,117 +977,80 @@ bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
}
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, bool* is_volatile) {
+ bool is_put, MemberOffset* field_offset,
+ bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *is_volatile = true;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, false);
- if (resolved_field != NULL && !resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
- fields_class != referrer_class;
- if (access_ok && !is_write_to_final_from_wrong_class) {
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedInstanceField();
- return true; // Fast path.
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedInstanceField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastInstanceField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ }
+ ProcessedInstanceField(result);
+ return result;
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, int* storage_index,
- bool* is_referrers_class, bool* is_volatile,
- bool* is_initialized) {
+ bool is_put, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class,
+ bool* is_volatile, bool* is_initialized) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *storage_index = -1;
- *is_referrers_class = false;
- *is_volatile = true;
- *is_initialized = false;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, true);
- if (resolved_field != NULL && resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- if (fields_class == referrer_class) {
- *is_referrers_class = true; // implies no worrying about class initialization
- *is_initialized = true;
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedLocalStaticField();
- return true; // fast path
- } else {
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
- if (access_ok && !is_write_to_final_from_wrong_class) {
- // We have the resolved field, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
- if (fields_class->GetDexCache() == dex_cache) {
- // common case where the dex cache of both the referrer and the field are the same,
- // no need to search the dex file
- *storage_index = fields_class->GetDexTypeIndex();
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- // Search dex file for localized ssb index, may fail if field's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- const DexFile::StringId* string_id =
- mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
- if (string_id != NULL) {
- const DexFile::TypeId* type_id =
- mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
- if (type_id != NULL) {
- // medium path, needs check of static storage base being initialized
- *storage_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- }
- }
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedStaticField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastStaticField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset,
+ storage_index, is_referrers_class, is_initialized);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ *storage_index = -1;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ }
+ ProcessedStaticField(result, *is_referrers_class);
+ return result;
}
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
@@ -1368,13 +1361,13 @@ class ParallelCompilationManager {
jobject class_loader,
CompilerDriver* compiler,
const DexFile* dex_file,
- ThreadPool& thread_pool)
+ ThreadPool* thread_pool)
: index_(0),
class_linker_(class_linker),
class_loader_(class_loader),
compiler_(compiler),
dex_file_(dex_file),
- thread_pool_(&thread_pool) {}
+ thread_pool_(thread_pool) {}
ClassLinker* GetClassLinker() const {
CHECK(class_linker_ != NULL);
@@ -1628,7 +1621,7 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
}
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: we could resolve strings here, although the string table is largely filled with class
@@ -1638,16 +1631,16 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
if (IsImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
- timings.NewSplit("Resolve Types");
+ timings->NewSplit("Resolve Types");
context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
}
- timings.NewSplit("Resolve MethodsAndFields");
+ timings->NewSplit("Resolve MethodsAndFields");
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
}
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1702,8 +1695,8 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
}
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("Verify Dex File");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("Verify Dex File");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
@@ -1805,8 +1798,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
}
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("InitializeNoClinit");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("InitializeNoClinit");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool);
size_t thread_count;
@@ -1825,7 +1818,7 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile&
void CompilerDriver::InitializeClasses(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1834,7 +1827,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1916,8 +1909,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
}
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
- timings.NewSplit("Compile Dex File");
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ timings->NewSplit("Compile Dex File");
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
@@ -1932,8 +1925,12 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
uint64_t start_ns = NanoTime();
if ((access_flags & kAccNative) != 0) {
+#if defined(__x86_64__)
+ // leaving this empty will trigger the generic JNI version
+#else
compiled_method = compiler_backend_->JniCompile(*this, access_flags, method_idx, dex_file);
CHECK(compiled_method != NULL);
+#endif
} else if ((access_flags & kAccAbstract) != 0) {
} else {
MethodReference method_ref(&dex_file, method_idx);
@@ -2037,38 +2034,38 @@ bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex
bool CompilerDriver::WriteElf(const std::string& android_root,
bool is_host,
const std::vector<const art::DexFile*>& dex_files,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
art::File* file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return compiler_backend_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
}
void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set,
- std::string& target_triple,
- std::string& target_cpu,
- std::string& target_attr) {
+ std::string* target_triple,
+ std::string* target_cpu,
+ std::string* target_attr) {
switch (instruction_set) {
case kThumb2:
- target_triple = "thumb-none-linux-gnueabi";
- target_cpu = "cortex-a9";
- target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db";
+ *target_triple = "thumb-none-linux-gnueabi";
+ *target_cpu = "cortex-a9";
+ *target_attr = "+thumb2,+neon,+neonfp,+vfp3,+db";
break;
case kArm:
- target_triple = "armv7-none-linux-gnueabi";
+ *target_triple = "armv7-none-linux-gnueabi";
// TODO: Fix for Nexus S.
- target_cpu = "cortex-a9";
+ *target_cpu = "cortex-a9";
// TODO: Fix for Xoom.
- target_attr = "+v7,+neon,+neonfp,+vfp3,+db";
+ *target_attr = "+v7,+neon,+neonfp,+vfp3,+db";
break;
case kX86:
- target_triple = "i386-pc-linux-gnu";
- target_attr = "";
+ *target_triple = "i386-pc-linux-gnu";
+ *target_attr = "";
break;
case kMips:
- target_triple = "mipsel-unknown-linux";
- target_attr = "mips32r2";
+ *target_triple = "mipsel-unknown-linux";
+ *target_attr = "mips32r2";
break;
default:
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 377eb6fa34..80a6796a4e 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -48,9 +48,10 @@ class AOTCompilationStats;
class CompilerOptions;
class DexCompilationUnit;
class DexFileToMethodInlinerMap;
-class InlineIGetIPutData;
+struct InlineIGetIPutData;
class OatWriter;
class ParallelCompilationManager;
+class ScopedObjectAccess;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -108,11 +109,11 @@ class CompilerDriver {
~CompilerDriver();
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings)
+ TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Compile a single Method.
- void CompileOne(mirror::ArtMethod* method, TimingLogger& timings)
+ void CompileOne(mirror::ArtMethod* method, TimingLogger* timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const {
@@ -123,16 +124,15 @@ class CompilerDriver {
return method_inliner_map_;
}
- const InstructionSet& GetInstructionSet() const {
+ InstructionSet GetInstructionSet() const {
return instruction_set_;
}
- const InstructionSetFeatures& GetInstructionSetFeatures() const {
+ InstructionSetFeatures GetInstructionSetFeatures() const {
return instruction_set_features_;
}
const CompilerOptions& GetCompilerOptions() const {
- DCHECK(compiler_options_ != nullptr);
return *compiler_options_;
}
@@ -164,6 +164,8 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
@@ -204,6 +206,53 @@ class CompilerDriver {
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr);
+ // Get the DexCache for the
+ mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve compiling method's class. Returns nullptr on failure.
+ mirror::Class* ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
+ mirror::ArtField* ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get declaration location of a resolved field.
+ void GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
+ std::pair<bool, bool> IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the field offset,
+ // the type index of the declaring class in the referrer's dex file and whether the declaring
+ // class is the referrer's class or at least can be assumed to be initialized.
+ std::pair<bool, bool> IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void ProcessedInstanceField(bool resolved);
+ void ProcessedStaticField(bool resolved, bool local);
+
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
@@ -213,13 +262,13 @@ class CompilerDriver {
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, bool* is_volatile)
+ MemberOffset* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, int* storage_index,
+ MemberOffset* field_offset, uint32_t* storage_index,
bool* is_referrers_class, bool* is_volatile, bool* is_initialized)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -275,21 +324,21 @@ class CompilerDriver {
support_boot_image_fixup_ = support_boot_image_fixup;
}
- ArenaPool& GetArenaPool() {
- return arena_pool_;
+ ArenaPool* GetArenaPool() {
+ return &arena_pool_;
}
bool WriteElf(const std::string& android_root,
bool is_host,
const std::vector<const DexFile*>& dex_files,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
File* file);
- // TODO: move to a common home for llvm helpers once quick/portable are merged
+ // TODO: move to a common home for llvm helpers once quick/portable are merged.
static void InstructionSetToLLVMTarget(InstructionSet instruction_set,
- std::string& target_triple,
- std::string& target_cpu,
- std::string& target_attr);
+ std::string* target_triple,
+ std::string* target_cpu,
+ std::string* target_attr);
void SetCompilerContext(void* compiler_context) {
compiler_context_ = compiler_context;
@@ -310,8 +359,8 @@ class CompilerDriver {
return dump_passes_;
}
- CumulativeLogger& GetTimingsLogger() const {
- return *timings_logger_;
+ CumulativeLogger* GetTimingsLogger() const {
+ return timings_logger_;
}
class PatchInformation {
@@ -494,6 +543,15 @@ class CompilerDriver {
std::vector<uint8_t>* DeduplicateMappingTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateVMapTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
+ std::vector<uint8_t>* DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info);
+
+ /*
+ * @brief return the pointer to the Call Frame Information.
+ * @return pointer to call frame information for this compilation.
+ */
+ std::vector<uint8_t>* GetCallFrameInformation() const {
+ return cfi_info_.get();
+ }
private:
// Compute constant code and method pointers when possible
@@ -507,43 +565,42 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void LoadImageClasses(TimingLogger& timings);
+ void LoadImageClasses(TimingLogger* timings);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool* thread_pool, TimingLogger* timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
- void UpdateImageClasses(TimingLogger& timings)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool* thread_pool, TimingLogger* timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
@@ -627,6 +684,9 @@ class CompilerDriver {
bool support_boot_image_fixup_;
+ // Call Frame Information, which might be generated to help stack tracebacks.
+ UniquePtr<std::vector<uint8_t> > cfi_info_;
+
// DeDuplication data structures, these own the corresponding byte arrays.
class DedupeHashFunc {
public:
@@ -665,6 +725,7 @@ class CompilerDriver {
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_cfi_info_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 34806ce293..2b3af6281f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -40,7 +40,7 @@ class CompilerDriverTest : public CommonCompilerTest {
timings.StartSplit("CompileAll");
compiler_driver_->CompileAll(class_loader,
Runtime::Current()->GetCompileTimeClassPath(class_loader),
- timings);
+ &timings);
MakeAllExecutable(class_loader);
timings.EndSplit();
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 9f6745b015..39738ab049 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,7 +43,8 @@ class CompilerOptions {
large_method_threshold_(kDefaultLargeMethodThreshold),
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
- num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold)
+ num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
+ generate_gdb_information_(false)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(false)
#endif
@@ -54,7 +55,8 @@ class CompilerOptions {
size_t large_method_threshold,
size_t small_method_threshold,
size_t tiny_method_threshold,
- size_t num_dex_methods_threshold
+ size_t num_dex_methods_threshold,
+ bool generate_gdb_information
#ifdef ART_SEA_IR_MODE
, bool sea_ir_mode
#endif
@@ -64,7 +66,8 @@ class CompilerOptions {
large_method_threshold_(large_method_threshold),
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
- num_dex_methods_threshold_(num_dex_methods_threshold)
+ num_dex_methods_threshold_(num_dex_methods_threshold),
+ generate_gdb_information_(generate_gdb_information)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(sea_ir_mode)
#endif
@@ -118,6 +121,10 @@ class CompilerOptions {
bool GetSeaIrMode();
#endif
+ bool GetGenerateGDBInformation() const {
+ return generate_gdb_information_;
+ }
+
private:
CompilerFilter compiler_filter_;
size_t huge_method_threshold_;
@@ -125,6 +132,7 @@ class CompilerOptions {
size_t small_method_threshold_;
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
+ bool generate_gdb_information_;
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode_;
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index 6db3fa1a11..ccc26a1baf 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -30,12 +30,7 @@
namespace art {
-ElfWriter::ElfWriter(const CompilerDriver& driver, File* elf_file)
- : compiler_driver_(&driver), elf_file_(elf_file) {}
-
-ElfWriter::~ElfWriter() {}
-
-Elf32_Addr ElfWriter::GetOatDataAddress(ElfFile* elf_file) {
+uint32_t ElfWriter::GetOatDataAddress(ElfFile* elf_file) {
Elf32_Addr oatdata_address = elf_file->FindSymbolAddress(SHT_DYNSYM,
"oatdata",
false);
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 99dfc56d49..3610d1a8b2 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -23,7 +23,6 @@
#include <vector>
#include "base/macros.h"
-#include "elf_utils.h"
#include "os.h"
namespace art {
@@ -42,21 +41,23 @@ class ElfWriter {
size_t& oat_data_offset);
// Returns runtime oat_data runtime address for an opened ElfFile.
- static Elf32_Addr GetOatDataAddress(ElfFile* elf_file);
+ static uint32_t GetOatDataAddress(ElfFile* elf_file);
protected:
- ElfWriter(const CompilerDriver& driver, File* elf_file);
- virtual ~ElfWriter();
+ ElfWriter(const CompilerDriver& driver, File* elf_file)
+ : compiler_driver_(&driver), elf_file_(elf_file) {
+ }
- virtual bool Write(OatWriter& oat_writer,
+ virtual ~ElfWriter() {}
+
+ virtual bool Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
- // Setup by constructor
- const CompilerDriver* compiler_driver_;
- File* elf_file_;
+ const CompilerDriver* const compiler_driver_;
+ File* const elf_file_;
};
} // namespace art
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index c7baf4f3e5..b2d3a69e74 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -16,6 +16,7 @@
#include "elf_writer_mclinker.h"
+#include <llvm/Support/ELF.h>
#include <llvm/Support/TargetSelect.h>
#include <mcld/Environment.h>
@@ -32,7 +33,6 @@
#include "class_linker.h"
#include "dex_method_iterator.h"
#include "driver/compiler_driver.h"
-#include "elf_file.h"
#include "globals.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
@@ -44,12 +44,14 @@
namespace art {
ElfWriterMclinker::ElfWriterMclinker(const CompilerDriver& driver, File* elf_file)
- : ElfWriter(driver, elf_file), oat_input_(NULL) {}
+ : ElfWriter(driver, elf_file), oat_input_(nullptr) {
+}
-ElfWriterMclinker::~ElfWriterMclinker() {}
+ElfWriterMclinker::~ElfWriterMclinker() {
+}
bool ElfWriterMclinker::Create(File* elf_file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -58,29 +60,29 @@ bool ElfWriterMclinker::Create(File* elf_file,
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
-bool ElfWriterMclinker::Write(OatWriter& oat_writer,
+bool ElfWriterMclinker::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host) {
std::vector<uint8_t> oat_contents;
- oat_contents.reserve(oat_writer.GetSize());
+ oat_contents.reserve(oat_writer->GetSize());
VectorOutputStream output_stream("oat contents", oat_contents);
- CHECK(oat_writer.Write(output_stream));
- CHECK_EQ(oat_writer.GetSize(), oat_contents.size());
+ CHECK(oat_writer->Write(&output_stream));
+ CHECK_EQ(oat_writer->GetSize(), oat_contents.size());
Init();
AddOatInput(oat_contents);
-#if defined(ART_USE_PORTABLE_COMPILER)
- AddMethodInputs(dex_files);
- AddRuntimeInputs(android_root, is_host);
-#endif
+ if (kUsePortableCompiler) {
+ AddMethodInputs(dex_files);
+ AddRuntimeInputs(android_root, is_host);
+ }
if (!Link()) {
return false;
}
oat_contents.clear();
-#if defined(ART_USE_PORTABLE_COMPILER)
- FixupOatMethodOffsets(dex_files);
-#endif
+ if (kUsePortableCompiler) {
+ FixupOatMethodOffsets(dex_files);
+ }
return true;
}
@@ -100,9 +102,9 @@ void ElfWriterMclinker::Init() {
std::string target_cpu;
std::string target_attr;
CompilerDriver::InstructionSetToLLVMTarget(compiler_driver_->GetInstructionSet(),
- target_triple,
- target_cpu,
- target_attr);
+ &target_triple,
+ &target_cpu,
+ &target_attr);
// Based on mclinker's llvm-mcld.cpp main() and LinkerTest
//
@@ -236,7 +238,6 @@ void ElfWriterMclinker::AddOatInput(std::vector<uint8_t>& oat_contents) {
text_section);
}
-#if defined(ART_USE_PORTABLE_COMPILER)
void ElfWriterMclinker::AddMethodInputs(const std::vector<const DexFile*>& dex_files) {
DCHECK(oat_input_ != NULL);
@@ -320,7 +321,6 @@ void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool i
mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib);
CHECK(libm_lib_input_input != NULL);
}
-#endif
bool ElfWriterMclinker::Link() {
// link inputs
@@ -345,7 +345,6 @@ bool ElfWriterMclinker::Link() {
return true;
}
-#if defined(ART_USE_PORTABLE_COMPILER)
void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files) {
std::string error_msg;
UniquePtr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false, &error_msg));
@@ -409,6 +408,5 @@ uint32_t ElfWriterMclinker::FixupCompiledCodeOffset(ElfFile& elf_file,
}
return compiled_code_offset;
}
-#endif
} // namespace art
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
index 8ee7231f79..13757edecd 100644
--- a/compiler/elf_writer_mclinker.h
+++ b/compiler/elf_writer_mclinker.h
@@ -37,11 +37,11 @@ namespace art {
class CompiledCode;
-class ElfWriterMclinker : public ElfWriter {
+class ElfWriterMclinker FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
static bool Create(File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -49,10 +49,11 @@ class ElfWriterMclinker : public ElfWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- virtual bool Write(OatWriter& oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host)
+ bool Write(OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host)
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -65,13 +66,11 @@ class ElfWriterMclinker : public ElfWriter {
void AddCompiledCodeInput(const CompiledCode& compiled_code);
void AddRuntimeInputs(const std::string& android_root, bool is_host);
bool Link();
-#if defined(ART_USE_PORTABLE_COMPILER)
void FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t FixupCompiledCodeOffset(ElfFile& elf_file,
- ::llvm::ELF::Elf32_Addr oatdata_address,
+ uint32_t oatdata_address,
const CompiledCode& compiled_code);
-#endif
// Setup by Init()
UniquePtr<mcld::LinkerConfig> linker_config_;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 3191374160..4b823ef5ec 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -29,13 +29,8 @@
namespace art {
-ElfWriterQuick::ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
- : ElfWriter(driver, elf_file) {}
-
-ElfWriterQuick::~ElfWriterQuick() {}
-
bool ElfWriterQuick::Create(File* elf_file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -44,7 +39,7 @@ bool ElfWriterQuick::Create(File* elf_file,
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
-bool ElfWriterQuick::Write(OatWriter& oat_writer,
+bool ElfWriterQuick::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files_unused,
const std::string& android_root_unused,
bool is_host_unused) {
@@ -103,6 +98,7 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// | .rodata\0 |
// | .text\0 |
// | .shstrtab\0 |
+ // | .debug_frame\0 |
// +-------------------------+
// | Elf32_Shdr NULL |
// | Elf32_Shdr .dynsym |
@@ -112,6 +108,9 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// | Elf32_Shdr .rodata |
// | Elf32_Shdr .dynamic |
// | Elf32_Shdr .shstrtab |
+ // | Elf32_Shdr .debug_info | (Optional)
+ // | Elf32_Shdr .debug_abbrev| (Optional)
+ // | Elf32_Shdr .debug_frame | (Optional)
// +-------------------------+
// phase 1: computing offsets
@@ -197,7 +196,7 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
// .rodata
uint32_t oat_data_alignment = kPageSize;
uint32_t oat_data_offset = expected_offset = RoundUp(expected_offset, oat_data_alignment);
- const OatHeader& oat_header = oat_writer.GetOatHeader();
+ const OatHeader& oat_header = oat_writer->GetOatHeader();
CHECK(oat_header.IsValid());
uint32_t oat_data_size = oat_header.GetExecutableOffset();
expected_offset += oat_data_size;
@@ -210,9 +209,9 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
uint32_t oat_exec_alignment = kPageSize;
CHECK_ALIGNED(expected_offset, kPageSize);
uint32_t oat_exec_offset = expected_offset = RoundUp(expected_offset, oat_exec_alignment);
- uint32_t oat_exec_size = oat_writer.GetSize() - oat_data_size;
+ uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
expected_offset += oat_exec_size;
- CHECK_EQ(oat_data_offset + oat_writer.GetSize(), expected_offset);
+ CHECK_EQ(oat_data_offset + oat_writer->GetSize(), expected_offset);
if (debug) {
LOG(INFO) << "oat_exec_offset=" << oat_exec_offset << std::hex << " " << oat_exec_offset;
LOG(INFO) << "oat_exec_size=" << oat_exec_size << std::hex << " " << oat_exec_size;
@@ -264,6 +263,18 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
uint32_t shstrtab_shstrtab_offset = shstrtab.size();
shstrtab += ".shstrtab";
shstrtab += '\0';
+ uint32_t shstrtab_debug_info_offset = shstrtab.size();
+ shstrtab += ".debug_info";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_abbrev_offset = shstrtab.size();
+ shstrtab += ".debug_abbrev";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_str_offset = shstrtab.size();
+ shstrtab += ".debug_str";
+ shstrtab += '\0';
+ uint32_t shstrtab_debug_frame_offset = shstrtab.size();
+ shstrtab += ".debug_frame";
+ shstrtab += '\0';
uint32_t shstrtab_size = shstrtab.size();
expected_offset += shstrtab_size;
if (debug) {
@@ -271,6 +282,52 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
LOG(INFO) << "shstrtab_size=" << shstrtab_size << std::hex << " " << shstrtab_size;
}
+ // Create debug informatin, if we have it.
+ bool generateDebugInformation = compiler_driver_->GetCallFrameInformation() != nullptr;
+ std::vector<uint8_t> dbg_info;
+ std::vector<uint8_t> dbg_abbrev;
+ std::vector<uint8_t> dbg_str;
+ if (generateDebugInformation) {
+ FillInCFIInformation(oat_writer, &dbg_info, &dbg_abbrev, &dbg_str);
+ }
+
+ uint32_t shdbg_info_alignment = 1;
+ uint32_t shdbg_info_offset = expected_offset;
+ uint32_t shdbg_info_size = dbg_info.size();
+ expected_offset += shdbg_info_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_info_offset=" << shdbg_info_offset << std::hex << " " << shdbg_info_offset;
+ LOG(INFO) << "shdbg_info_size=" << shdbg_info_size << std::hex << " " << shdbg_info_size;
+ }
+
+ uint32_t shdbg_abbrev_alignment = 1;
+ uint32_t shdbg_abbrev_offset = expected_offset;
+ uint32_t shdbg_abbrev_size = dbg_abbrev.size();
+ expected_offset += shdbg_abbrev_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_abbrev_offset=" << shdbg_abbrev_offset << std::hex << " " << shdbg_abbrev_offset;
+ LOG(INFO) << "shdbg_abbrev_size=" << shdbg_abbrev_size << std::hex << " " << shdbg_abbrev_size;
+ }
+
+ uint32_t shdbg_frm_alignment = 4;
+ uint32_t shdbg_frm_offset = expected_offset = RoundUp(expected_offset, shdbg_frm_alignment);
+ uint32_t shdbg_frm_size =
+ generateDebugInformation ? compiler_driver_->GetCallFrameInformation()->size() : 0;
+ expected_offset += shdbg_frm_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_frm_offset=" << shdbg_frm_offset << std::hex << " " << shdbg_frm_offset;
+ LOG(INFO) << "shdbg_frm_size=" << shdbg_frm_size << std::hex << " " << shdbg_frm_size;
+ }
+
+ uint32_t shdbg_str_alignment = 1;
+ uint32_t shdbg_str_offset = expected_offset;
+ uint32_t shdbg_str_size = dbg_str.size();
+ expected_offset += shdbg_str_size;
+ if (debug) {
+ LOG(INFO) << "shdbg_str_offset=" << shdbg_str_offset << std::hex << " " << shdbg_str_offset;
+ LOG(INFO) << "shdbg_str_size=" << shdbg_str_size << std::hex << " " << shdbg_str_size;
+ }
+
// section headers (after all sections)
uint32_t shdr_alignment = sizeof(Elf32_Word);
uint32_t shdr_offset = expected_offset = RoundUp(expected_offset, shdr_alignment);
@@ -282,7 +339,11 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
const uint8_t SH_TEXT = 5;
const uint8_t SH_DYNAMIC = 6;
const uint8_t SH_SHSTRTAB = 7;
- const uint8_t SH_NUM = 8;
+ const uint8_t SH_DBG_INFO = 8;
+ const uint8_t SH_DBG_ABRV = 9;
+ const uint8_t SH_DBG_FRM = 10;
+ const uint8_t SH_DBG_STR = 11;
+ const uint8_t SH_NUM = generateDebugInformation ? 12 : 8;
uint32_t shdr_size = sizeof(Elf32_Shdr) * SH_NUM;
expected_offset += shdr_size;
if (debug) {
@@ -559,6 +620,52 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
section_headers[SH_SHSTRTAB].sh_addralign = shstrtab_alignment;
section_headers[SH_SHSTRTAB].sh_entsize = 0;
+ if (generateDebugInformation) {
+ section_headers[SH_DBG_INFO].sh_name = shstrtab_debug_info_offset;
+ section_headers[SH_DBG_INFO].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_INFO].sh_flags = 0;
+ section_headers[SH_DBG_INFO].sh_addr = 0;
+ section_headers[SH_DBG_INFO].sh_offset = shdbg_info_offset;
+ section_headers[SH_DBG_INFO].sh_size = shdbg_info_size;
+ section_headers[SH_DBG_INFO].sh_link = 0;
+ section_headers[SH_DBG_INFO].sh_info = 0;
+ section_headers[SH_DBG_INFO].sh_addralign = shdbg_info_alignment;
+ section_headers[SH_DBG_INFO].sh_entsize = 0;
+
+ section_headers[SH_DBG_ABRV].sh_name = shstrtab_debug_abbrev_offset;
+ section_headers[SH_DBG_ABRV].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_ABRV].sh_flags = 0;
+ section_headers[SH_DBG_ABRV].sh_addr = 0;
+ section_headers[SH_DBG_ABRV].sh_offset = shdbg_abbrev_offset;
+ section_headers[SH_DBG_ABRV].sh_size = shdbg_abbrev_size;
+ section_headers[SH_DBG_ABRV].sh_link = 0;
+ section_headers[SH_DBG_ABRV].sh_info = 0;
+ section_headers[SH_DBG_ABRV].sh_addralign = shdbg_abbrev_alignment;
+ section_headers[SH_DBG_ABRV].sh_entsize = 0;
+
+ section_headers[SH_DBG_FRM].sh_name = shstrtab_debug_frame_offset;
+ section_headers[SH_DBG_FRM].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_FRM].sh_flags = 0;
+ section_headers[SH_DBG_FRM].sh_addr = 0;
+ section_headers[SH_DBG_FRM].sh_offset = shdbg_frm_offset;
+ section_headers[SH_DBG_FRM].sh_size = shdbg_frm_size;
+ section_headers[SH_DBG_FRM].sh_link = 0;
+ section_headers[SH_DBG_FRM].sh_info = 0;
+ section_headers[SH_DBG_FRM].sh_addralign = shdbg_frm_alignment;
+ section_headers[SH_DBG_FRM].sh_entsize = 0;
+
+ section_headers[SH_DBG_STR].sh_name = shstrtab_debug_str_offset;
+ section_headers[SH_DBG_STR].sh_type = SHT_PROGBITS;
+ section_headers[SH_DBG_STR].sh_flags = 0;
+ section_headers[SH_DBG_STR].sh_addr = 0;
+ section_headers[SH_DBG_STR].sh_offset = shdbg_str_offset;
+ section_headers[SH_DBG_STR].sh_size = shdbg_str_size;
+ section_headers[SH_DBG_STR].sh_link = 0;
+ section_headers[SH_DBG_STR].sh_info = 0;
+ section_headers[SH_DBG_STR].sh_addralign = shdbg_str_alignment;
+ section_headers[SH_DBG_STR].sh_entsize = 0;
+ }
+
// phase 3: writing file
// Elf32_Ehdr
@@ -622,13 +729,13 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
return false;
}
BufferedOutputStream output_stream(new FileOutputStream(elf_file_));
- if (!oat_writer.Write(output_stream)) {
+ if (!oat_writer->Write(&output_stream)) {
PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath();
return false;
}
// .dynamic
- DCHECK_LE(oat_data_offset + oat_writer.GetSize(), dynamic_offset);
+ DCHECK_LE(oat_data_offset + oat_writer->GetSize(), dynamic_offset);
if (static_cast<off_t>(dynamic_offset) != lseek(elf_file_->Fd(), dynamic_offset, SEEK_SET)) {
PLOG(ERROR) << "Failed to seek to .dynamic offset " << dynamic_offset
<< " for " << elf_file_->GetPath();
@@ -651,8 +758,62 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
return false;
}
+ if (generateDebugInformation) {
+ // .debug_info
+ DCHECK_LE(shstrtab_offset + shstrtab_size, shdbg_info_offset);
+ if (static_cast<off_t>(shdbg_info_offset) != lseek(elf_file_->Fd(), shdbg_info_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_info offset " << shdbg_info_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_info[0], shdbg_info_size)) {
+ PLOG(ERROR) << "Failed to write .debug_info for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_abbrev
+ DCHECK_LE(shdbg_info_offset + shdbg_info_size, shdbg_abbrev_offset);
+ if (static_cast<off_t>(shdbg_abbrev_offset) != lseek(elf_file_->Fd(), shdbg_abbrev_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_abbrev offset " << shdbg_abbrev_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_abbrev[0], shdbg_abbrev_size)) {
+ PLOG(ERROR) << "Failed to write .debug_abbrev for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_frame
+ DCHECK_LE(shdbg_abbrev_offset + shdbg_abbrev_size, shdbg_frm_offset);
+ if (static_cast<off_t>(shdbg_frm_offset) != lseek(elf_file_->Fd(), shdbg_frm_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_frm offset " << shdbg_frm_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&((*compiler_driver_->GetCallFrameInformation())[0]), shdbg_frm_size)) {
+ PLOG(ERROR) << "Failed to write .debug_frame for " << elf_file_->GetPath();
+ return false;
+ }
+
+ // .debug_str
+ DCHECK_LE(shdbg_frm_offset + shdbg_frm_size, shdbg_str_offset);
+ if (static_cast<off_t>(shdbg_str_offset) != lseek(elf_file_->Fd(), shdbg_str_offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to .shdbg_str offset " << shdbg_str_offset
+ << " for " << elf_file_->GetPath();
+ return false;
+ }
+ if (!elf_file_->WriteFully(&dbg_str[0], shdbg_str_size)) {
+ PLOG(ERROR) << "Failed to write .debug_frame for " << elf_file_->GetPath();
+ return false;
+ }
+ }
+
// section headers (after all sections)
- DCHECK_LE(shstrtab_offset + shstrtab_size, shdr_offset);
+ if (generateDebugInformation) {
+ DCHECK_LE(shdbg_str_offset + shdbg_str_size, shdr_offset);
+ } else {
+ DCHECK_LE(shstrtab_offset + shstrtab_size, shdr_offset);
+ }
if (static_cast<off_t>(shdr_offset) != lseek(elf_file_->Fd(), shdr_offset, SEEK_SET)) {
PLOG(ERROR) << "Failed to seek to ELF section headers offset " << shdr_offset
<< " for " << elf_file_->GetPath();
@@ -665,6 +826,164 @@ bool ElfWriterQuick::Write(OatWriter& oat_writer,
VLOG(compiler) << "ELF file written successfully: " << elf_file_->GetPath();
return true;
+} // NOLINT(readability/fn_size)
+
+static void UpdateWord(std::vector<uint8_t>*buf, int offset, int data) {
+ (*buf)[offset+0] = data;
+ (*buf)[offset+1] = data >> 8;
+ (*buf)[offset+2] = data >> 16;
+ (*buf)[offset+3] = data >> 24;
+}
+
+static void PushWord(std::vector<uint8_t>*buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+ buf->push_back((data >> 16) & 0xff);
+ buf->push_back((data >> 24) & 0xff);
+}
+
+static void PushHalf(std::vector<uint8_t>*buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+}
+
+// DWARF constants needed to generate CFI information.
+enum {
+ // Tag encodings.
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_subprogram = 0X2e,
+
+ // Attribute encodings.
+ DW_AT_name = 0x03,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+
+ // Constant encoding.
+ DW_CHILDREN_no = 0x00,
+ DW_CHILDREN_yes = 0x01,
+
+ // Attribute form encodings.
+ DW_FORM_addr = 0x01,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_strp = 0x0e,
+
+ // Language encoding.
+ DW_LANG_Java = 0x000b
+};
+
+void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
+ std::vector<uint8_t>* dbg_info,
+ std::vector<uint8_t>* dbg_abbrev,
+ std::vector<uint8_t>* dbg_str) {
+ // Create the debug_abbrev section with boilerplate information.
+ // We only care about low_pc and high_pc right now for the compilation
+ // unit and methods.
+
+ // Tag 1: Compilation unit: DW_TAG_compile_unit.
+ dbg_abbrev->push_back(1);
+ dbg_abbrev->push_back(DW_TAG_compile_unit);
+
+ // There are children (the methods).
+ dbg_abbrev->push_back(DW_CHILDREN_yes);
+
+ // DW_LANG_Java DW_FORM_data1.
+ dbg_abbrev->push_back(DW_AT_language);
+ dbg_abbrev->push_back(DW_FORM_data1);
+
+ // DW_AT_low_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_low_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // DW_AT_high_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_high_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // End of DW_TAG_compile_unit.
+ PushHalf(dbg_abbrev, 0);
+
+ // Tag 2: Compilation unit: DW_TAG_subprogram.
+ dbg_abbrev->push_back(2);
+ dbg_abbrev->push_back(DW_TAG_subprogram);
+
+ // There are no children.
+ dbg_abbrev->push_back(DW_CHILDREN_no);
+
+ // Name of the method.
+ dbg_abbrev->push_back(DW_AT_name);
+ dbg_abbrev->push_back(DW_FORM_strp);
+
+ // DW_AT_low_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_low_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // DW_AT_high_pc DW_FORM_addr.
+ dbg_abbrev->push_back(DW_AT_high_pc);
+ dbg_abbrev->push_back(DW_FORM_addr);
+
+ // End of DW_TAG_subprogram.
+ PushHalf(dbg_abbrev, 0);
+
+ // Start the debug_info section with the header information
+ // 'unit_length' will be filled in later.
+ PushWord(dbg_info, 0);
+
+ // 'version' - 3.
+ PushHalf(dbg_info, 3);
+
+ // Offset into .debug_abbrev section (always 0).
+ PushWord(dbg_info, 0);
+
+ // Address size: 4.
+ dbg_info->push_back(4);
+
+ // Start the description for the compilation unit.
+ // This uses tag 1.
+ dbg_info->push_back(1);
+
+ // The language is Java.
+ dbg_info->push_back(DW_LANG_Java);
+
+ // Leave space for low_pc and high_pc.
+ int low_pc_offset = dbg_info->size();
+ PushWord(dbg_info, 0);
+ PushWord(dbg_info, 0);
+
+ // Walk through the information in the method table, and enter into dbg_info.
+ const std::vector<OatWriter::DebugInfo>& dbg = oat_writer->GetCFIMethodInfo();
+ uint32_t low_pc = 0xFFFFFFFFU;
+ uint32_t high_pc = 0;
+
+ for (uint32_t i = 0; i < dbg.size(); i++) {
+ const OatWriter::DebugInfo& info = dbg[i];
+ if (info.low_pc_ < low_pc) {
+ low_pc = info.low_pc_;
+ }
+ if (info.high_pc_ > high_pc) {
+ high_pc = info.high_pc_;
+ }
+
+ // Start a new TAG: subroutine (2).
+ dbg_info->push_back(2);
+
+ // Enter the name into the string table (and NUL terminate).
+ uint32_t str_offset = dbg_str->size();
+ dbg_str->insert(dbg_str->end(), info.method_name_.begin(), info.method_name_.end());
+ dbg_str->push_back('\0');
+
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, str_offset);
+ PushWord(dbg_info, info.low_pc_);
+ PushWord(dbg_info, info.high_pc_);
+ }
+
+ // One byte terminator
+ dbg_info->push_back(0);
+
+ // We have now walked all the methods. Fill in lengths and low/high PCs.
+ UpdateWord(dbg_info, 0, dbg_info->size() - 4);
+ UpdateWord(dbg_info, low_pc_offset, low_pc);
+ UpdateWord(dbg_info, low_pc_offset + 4, high_pc);
}
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index f36d06f79d..dec75dc83f 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -21,11 +21,11 @@
namespace art {
-class ElfWriterQuick : public ElfWriter {
+class ElfWriterQuick FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
static bool Create(File* file,
- OatWriter& oat_writer,
+ OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
const std::string& android_root,
bool is_host,
@@ -33,15 +33,27 @@ class ElfWriterQuick : public ElfWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- virtual bool Write(OatWriter& oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host)
+ bool Write(OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host)
+ OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- ElfWriterQuick(const CompilerDriver& driver, File* elf_file);
- ~ElfWriterQuick();
+ ElfWriterQuick(const CompilerDriver& driver, File* elf_file)
+ : ElfWriter(driver, elf_file) {}
+ ~ElfWriterQuick() {}
+
+ /*
+ * @brief Generate the DWARF debug_info and debug_abbrev sections
+ * @param oat_writer The Oat file Writer.
+ * @param dbg_info Compilation unit information.
+ * @param dbg_abbrev Abbreviations used to generate dbg_info.
+ * @param dbg_str Debug strings.
+ */
+ void FillInCFIInformation(OatWriter* oat_writer, std::vector<uint8_t>* dbg_info,
+ std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str);
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 16e2aa208a..619b056f57 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -49,15 +49,15 @@ TEST_F(ImageTest, WriteRead) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
timings.StartSplit("CompileAll");
-#if defined(ART_USE_PORTABLE_COMPILER)
- // TODO: we disable this for portable so the test executes in a reasonable amount of time.
- // We shouldn't need to do this.
- runtime_->SetCompilerFilter(Runtime::kInterpretOnly);
-#endif
+ if (kUsePortableCompiler) {
+ // TODO: we disable this for portable so the test executes in a reasonable amount of time.
+ // We shouldn't need to do this.
+ compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
+ }
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
dex_file->EnableWrite();
}
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
ScopedObjectAccess soa(Thread::Current());
OatWriter oat_writer(class_linker->GetBootClassPath(),
@@ -65,7 +65,7 @@ TEST_F(ImageTest, WriteRead) {
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
- oat_writer,
+ &oat_writer,
tmp_elf.GetFile());
ASSERT_TRUE(success);
timings.EndSplit();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index aa16885039..964cfe99b2 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -104,6 +104,8 @@ bool ImageWriter::Write(const std::string& image_filename,
portable_to_interpreter_bridge_offset_ =
oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
+ quick_generic_jni_trampoline_offset_ =
+ oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset();
quick_imt_conflict_trampoline_offset_ =
oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset();
quick_resolution_trampoline_offset_ =
@@ -638,7 +640,12 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
if (quick_code != nullptr) {
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
} else {
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+ if (orig->IsNative() && !orig->IsStatic()) {
+ // non-static native method missing compiled code, use generic JNI version
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_generic_jni_trampoline_offset_));
+ } else {
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+ }
}
const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
if (portable_code != nullptr) {
@@ -807,6 +814,12 @@ void ImageWriter::PatchOatCodeAndMethods() {
uintptr_t value = quick_code - patch_location + patch->RelativeOffset();
SetPatchLocation(patch, value);
} else {
+ // generic JNI, not interpreter bridge from GetQuickOatCodeFor().
+ if (target->IsNative() &&
+ quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
+ code_offset = quick_generic_jni_trampoline_offset_;
+ }
+
SetPatchLocation(patch, PointerToLowMemUInt32(GetOatAddress(code_offset)));
}
}
@@ -845,7 +858,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
if (patch->IsCall()) {
const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
const DexFile::MethodId& id = cpatch->GetDexFile().GetMethodId(cpatch->GetTargetMethodIdx());
- uintptr_t expected = reinterpret_cast<uintptr_t>(&id);
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
uint32_t actual = *patch_location;
CHECK(actual == expected || actual == value) << std::hex
<< "actual=" << actual
@@ -855,7 +868,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
if (patch->IsType()) {
const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
- uintptr_t expected = reinterpret_cast<uintptr_t>(&id);
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
uint32_t actual = *patch_location;
CHECK(actual == expected || actual == value) << std::hex
<< "actual=" << actual
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a1504eeca8..dff33bad1e 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -41,8 +41,8 @@ class ImageWriter {
: compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0), portable_imt_conflict_trampoline_offset_(0),
- portable_resolution_trampoline_offset_(0), quick_imt_conflict_trampoline_offset_(0),
- quick_resolution_trampoline_offset_(0) {}
+ portable_resolution_trampoline_offset_(0), quick_generic_jni_trampoline_offset_(0),
+ quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0) {}
~ImageWriter() {}
@@ -195,6 +195,7 @@ class ImageWriter {
uint32_t portable_imt_conflict_trampoline_offset_;
uint32_t portable_resolution_trampoline_offset_;
uint32_t portable_to_interpreter_bridge_offset_;
+ uint32_t quick_generic_jni_trampoline_offset_;
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index a5acd2a332..4ce714a183 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -175,16 +175,16 @@ CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
} // namespace llvm
} // namespace art
-inline static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver& driver) {
+static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver& driver) {
void *compiler_context = driver.GetCompilerContext();
CHECK(compiler_context != NULL);
return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
}
-inline static const art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
+static art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
void *compiler_context = driver.GetCompilerContext();
CHECK(compiler_context != NULL);
- return reinterpret_cast<const art::llvm::CompilerLLVM*>(compiler_context);
+ return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
}
extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver) {
@@ -233,7 +233,7 @@ extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& dri
return result;
}
-extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
- std::string const& filename) {
+extern "C" void compilerLLVMSetBitcodeFileName(const art::CompilerDriver& driver,
+ const std::string& filename) {
ContextOf(driver)->SetBitcodeFileName(filename);
}
diff --git a/compiler/llvm/compiler_llvm.h b/compiler/llvm/compiler_llvm.h
index 65bc16bcd8..c2211fb92c 100644
--- a/compiler/llvm/compiler_llvm.h
+++ b/compiler/llvm/compiler_llvm.h
@@ -70,7 +70,7 @@ class CompilerLLVM {
return insn_set_;
}
- void SetBitcodeFileName(std::string const& filename) {
+ void SetBitcodeFileName(const std::string& filename) {
bitcode_filename_ = filename;
}
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 8f22a97968..cf28db3bfc 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "dex_file.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
@@ -1602,7 +1603,7 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
llvm::Value* field_value;
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
@@ -1633,12 +1634,12 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
field_value = irb_.CreateBitCast(field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1664,7 +1665,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
@@ -1698,7 +1699,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
if (is_volatile) {
irb_.CreateMemoryBarrier(art::kStoreStore);
@@ -1707,7 +1708,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1875,8 +1876,8 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1913,7 +1914,7 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
static_field_value = irb_.CreateBitCast(static_field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -1929,11 +1930,11 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
@@ -1960,8 +1961,8 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
new_value = irb_.CreateBitCast(new_value, irb_.getJType(field_jty));
}
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1999,7 +2000,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -2015,7 +2016,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
@@ -2023,7 +2024,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
irb_.CreateMemoryBarrier(art::kStoreStore);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index d23706d9f4..1d027f9d3b 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -199,7 +199,8 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
std::string target_triple;
std::string target_cpu;
std::string target_attr;
- CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), target_triple, target_cpu, target_attr);
+ CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), &target_triple, &target_cpu,
+ &target_attr);
std::string errmsg;
const ::llvm::Target* target =
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 55a962f7fd..93c35022f2 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -105,7 +105,7 @@ TEST_F(OatTest, WriteRead) {
jobject class_loader = NULL;
if (kCompile) {
TimingLogger timings("OatTest::WriteRead", false, false);
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
ScopedObjectAccess soa(Thread::Current());
@@ -119,12 +119,12 @@ TEST_F(OatTest, WriteRead) {
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
- oat_writer,
+ &oat_writer,
tmp.GetFile());
ASSERT_TRUE(success);
if (kCompile) { // OatWriter strips the code, regenerate to compare
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::string error_msg;
UniquePtr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false,
@@ -175,7 +175,7 @@ TEST_F(OatTest, WriteRead) {
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(76U, sizeof(OatHeader));
+ EXPECT_EQ(80U, sizeof(OatHeader));
EXPECT_EQ(28U, sizeof(OatMethodOffsets));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7c5669a3ab..181240ead3 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -60,6 +60,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
size_portable_imt_conflict_trampoline_(0),
size_portable_resolution_trampoline_(0),
size_portable_to_interpreter_bridge_(0),
+ size_quick_generic_jni_trampoline_(0),
size_quick_imt_conflict_trampoline_(0),
size_quick_resolution_trampoline_(0),
size_quick_to_interpreter_bridge_(0),
@@ -229,7 +230,7 @@ size_t OatWriter::InitOatClasses(size_t offset) {
oat_classes_.push_back(oat_class);
offset += oat_class->SizeOf();
}
- oat_dex_files_[i]->UpdateChecksum(*oat_header_);
+ oat_dex_files_[i]->UpdateChecksum(oat_header_);
}
return offset;
}
@@ -256,6 +257,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
DO_TRAMPOLINE(portable_imt_conflict_trampoline_, PortableImtConflictTrampoline);
DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
+ DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge);
@@ -268,6 +270,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
oat_header_->SetPortableImtConflictTrampolineOffset(0);
oat_header_->SetPortableResolutionTrampolineOffset(0);
oat_header_->SetPortableToInterpreterBridgeOffset(0);
+ oat_header_->SetQuickGenericJniTrampolineOffset(0);
oat_header_->SetQuickImtConflictTrampolineOffset(0);
oat_header_->SetQuickResolutionTrampolineOffset(0);
oat_header_->SetQuickToInterpreterBridgeOffset(0);
@@ -293,7 +296,7 @@ size_t OatWriter::InitOatCodeDexFile(size_t offset,
class_def_index++, (*oat_class_index)++) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
offset = InitOatCodeClassDef(offset, *oat_class_index, class_def_index, dex_file, class_def);
- oat_classes_[*oat_class_index]->UpdateChecksum(*oat_header_);
+ oat_classes_[*oat_class_index]->UpdateChecksum(oat_header_);
}
return offset;
}
@@ -378,6 +381,27 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
uint32_t thumb_offset = compiled_method->CodeDelta();
quick_code_offset = offset + sizeof(code_size) + thumb_offset;
+ std::vector<uint8_t>* cfi_info = compiler_driver_->GetCallFrameInformation();
+ if (cfi_info != nullptr) {
+ // Copy in the FDE, if present
+ const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
+ if (fde != nullptr) {
+ // Copy the information into cfi_info and then fix the address in the new copy.
+ int cur_offset = cfi_info->size();
+ cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+
+ // Set the 'initial_location' field to address the start of the method.
+ uint32_t new_value = quick_code_offset - oat_header_->GetExecutableOffset();
+ uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
+ (*cfi_info)[offset_to_update+0] = new_value;
+ (*cfi_info)[offset_to_update+1] = new_value >> 8;
+ (*cfi_info)[offset_to_update+2] = new_value >> 16;
+ (*cfi_info)[offset_to_update+3] = new_value >> 24;
+ method_info_.push_back(DebugInfo(PrettyMethod(class_def_method_index, dex_file, false),
+ new_value, new_value + code_size));
+ }
+ }
+
// Deduplicate code arrays
SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter =
code_offsets_.find(quick_code);
@@ -499,42 +523,42 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
}
#define DCHECK_OFFSET() \
- DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out.Seek(0, kSeekCurrent)) \
+ DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " relative_offset=" << relative_offset
#define DCHECK_OFFSET_() \
- DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out.Seek(0, kSeekCurrent)) \
+ DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " offset_=" << offset_
-bool OatWriter::Write(OutputStream& out) {
- const size_t file_offset = out.Seek(0, kSeekCurrent);
+bool OatWriter::Write(OutputStream* out) {
+ const size_t file_offset = out->Seek(0, kSeekCurrent);
- if (!out.WriteFully(oat_header_, sizeof(*oat_header_))) {
- PLOG(ERROR) << "Failed to write oat header to " << out.GetLocation();
+ if (!out->WriteFully(oat_header_, sizeof(*oat_header_))) {
+ PLOG(ERROR) << "Failed to write oat header to " << out->GetLocation();
return false;
}
size_oat_header_ += sizeof(*oat_header_);
- if (!out.WriteFully(image_file_location_.data(), image_file_location_.size())) {
- PLOG(ERROR) << "Failed to write oat header image file location to " << out.GetLocation();
+ if (!out->WriteFully(image_file_location_.data(), image_file_location_.size())) {
+ PLOG(ERROR) << "Failed to write oat header image file location to " << out->GetLocation();
return false;
}
size_oat_header_image_file_location_ += image_file_location_.size();
if (!WriteTables(out, file_offset)) {
- LOG(ERROR) << "Failed to write oat tables to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat tables to " << out->GetLocation();
return false;
}
size_t relative_offset = WriteCode(out, file_offset);
if (relative_offset == 0) {
- LOG(ERROR) << "Failed to write oat code to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
return false;
}
relative_offset = WriteCodeDexFiles(out, file_offset, relative_offset);
if (relative_offset == 0) {
- LOG(ERROR) << "Failed to write oat code for dex files to " << out.GetLocation();
+ LOG(ERROR) << "Failed to write oat code for dex files to " << out->GetLocation();
return false;
}
@@ -555,6 +579,7 @@ bool OatWriter::Write(OutputStream& out) {
DO_STAT(size_portable_imt_conflict_trampoline_);
DO_STAT(size_portable_resolution_trampoline_);
DO_STAT(size_portable_to_interpreter_bridge_);
+ DO_STAT(size_quick_generic_jni_trampoline_);
DO_STAT(size_quick_imt_conflict_trampoline_);
DO_STAT(size_quick_resolution_trampoline_);
DO_STAT(size_quick_to_interpreter_bridge_);
@@ -577,26 +602,26 @@ bool OatWriter::Write(OutputStream& out) {
#undef DO_STAT
VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)"; \
- CHECK_EQ(file_offset + size_total, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+ CHECK_EQ(file_offset + size_total, static_cast<uint32_t>(out->Seek(0, kSeekCurrent)));
CHECK_EQ(size_, size_total);
}
- CHECK_EQ(file_offset + size_, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+ CHECK_EQ(file_offset + size_, static_cast<uint32_t>(out->Seek(0, kSeekCurrent)));
CHECK_EQ(size_, relative_offset);
return true;
}
-bool OatWriter::WriteTables(OutputStream& out, const size_t file_offset) {
+bool OatWriter::WriteTables(OutputStream* out, const size_t file_offset) {
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
if (!oat_dex_files_[i]->Write(this, out, file_offset)) {
- PLOG(ERROR) << "Failed to write oat dex information to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write oat dex information to " << out->GetLocation();
return false;
}
}
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
uint32_t expected_offset = file_offset + oat_dex_files_[i]->dex_file_offset_;
- off_t actual_offset = out.Seek(expected_offset, kSeekSet);
+ off_t actual_offset = out->Seek(expected_offset, kSeekSet);
if (static_cast<uint32_t>(actual_offset) != expected_offset) {
const DexFile* dex_file = (*dex_files_)[i];
PLOG(ERROR) << "Failed to seek to dex file section. Actual: " << actual_offset
@@ -604,29 +629,29 @@ bool OatWriter::WriteTables(OutputStream& out, const size_t file_offset) {
return false;
}
const DexFile* dex_file = (*dex_files_)[i];
- if (!out.WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) {
+ if (!out->WriteFully(&dex_file->GetHeader(), dex_file->GetHeader().file_size_)) {
PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation()
- << " to " << out.GetLocation();
+ << " to " << out->GetLocation();
return false;
}
size_dex_file_ += dex_file->GetHeader().file_size_;
}
for (size_t i = 0; i != oat_classes_.size(); ++i) {
if (!oat_classes_[i]->Write(this, out, file_offset)) {
- PLOG(ERROR) << "Failed to write oat methods information to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write oat methods information to " << out->GetLocation();
return false;
}
}
return true;
}
-size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
+size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset) {
size_t relative_offset = oat_header_->GetExecutableOffset();
- off_t new_offset = out.Seek(size_executable_offset_alignment_, kSeekCurrent);
+ off_t new_offset = out->Seek(size_executable_offset_alignment_, kSeekCurrent);
size_t expected_file_offset = file_offset + relative_offset;
if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
- << " Expected: " << expected_file_offset << " File: " << out.GetLocation();
+ << " Expected: " << expected_file_offset << " File: " << out->GetLocation();
return 0;
}
DCHECK_OFFSET();
@@ -637,10 +662,10 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
do { \
uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \
uint32_t alignment_padding = aligned_offset - relative_offset; \
- out.Seek(alignment_padding, kSeekCurrent); \
+ out->Seek(alignment_padding, kSeekCurrent); \
size_trampoline_alignment_ += alignment_padding; \
- if (!out.WriteFully(&(*field)[0], field->size())) { \
- PLOG(ERROR) << "Failed to write " # field " to " << out.GetLocation(); \
+ if (!out->WriteFully(&(*field)[0], field->size())) { \
+ PLOG(ERROR) << "Failed to write " # field " to " << out->GetLocation(); \
return false; \
} \
size_ ## field += field->size(); \
@@ -654,6 +679,7 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
DO_TRAMPOLINE(portable_imt_conflict_trampoline_);
DO_TRAMPOLINE(portable_resolution_trampoline_);
DO_TRAMPOLINE(portable_to_interpreter_bridge_);
+ DO_TRAMPOLINE(quick_generic_jni_trampoline_);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
DO_TRAMPOLINE(quick_resolution_trampoline_);
DO_TRAMPOLINE(quick_to_interpreter_bridge_);
@@ -662,7 +688,7 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) {
return relative_offset;
}
-size_t OatWriter::WriteCodeDexFiles(OutputStream& out,
+size_t OatWriter::WriteCodeDexFiles(OutputStream* out,
const size_t file_offset,
size_t relative_offset) {
size_t oat_class_index = 0;
@@ -678,7 +704,7 @@ size_t OatWriter::WriteCodeDexFiles(OutputStream& out,
return relative_offset;
}
-size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
+size_t OatWriter::WriteCodeDexFile(OutputStream* out, const size_t file_offset,
size_t relative_offset, size_t* oat_class_index,
const DexFile& dex_file) {
for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
@@ -694,12 +720,12 @@ size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
}
void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx,
- const DexFile& dex_file, OutputStream& out) const {
+ const DexFile& dex_file, const OutputStream& out) const {
PLOG(ERROR) << "Failed to write " << what << " for " << PrettyMethod(method_idx, dex_file)
<< " to " << out.GetLocation();
}
-size_t OatWriter::WriteCodeClassDef(OutputStream& out,
+size_t OatWriter::WriteCodeClassDef(OutputStream* out,
const size_t file_offset,
size_t relative_offset,
size_t oat_class_index,
@@ -747,7 +773,7 @@ size_t OatWriter::WriteCodeClassDef(OutputStream& out,
return relative_offset;
}
-size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
+size_t OatWriter::WriteCodeMethod(OutputStream* out, const size_t file_offset,
size_t relative_offset, size_t oat_class_index,
size_t class_def_method_index, size_t* method_offsets_index,
bool is_static, uint32_t method_idx, const DexFile& dex_file) {
@@ -763,12 +789,12 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
uint32_t aligned_offset = compiled_method->AlignCode(relative_offset);
uint32_t aligned_code_delta = aligned_offset - relative_offset;
if (aligned_code_delta != 0) {
- off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent);
+ off_t new_offset = out->Seek(aligned_code_delta, kSeekCurrent);
size_code_alignment_ += aligned_code_delta;
uint32_t expected_offset = file_offset + aligned_offset;
if (static_cast<uint32_t>(new_offset) != expected_offset) {
PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset
- << " Expected: " << expected_offset << " File: " << out.GetLocation();
+ << " Expected: " << expected_offset << " File: " << out->GetLocation();
return 0;
}
relative_offset += aligned_code_delta;
@@ -787,15 +813,15 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
<< PrettyMethod(method_idx, dex_file);
} else {
DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&code_size, sizeof(code_size))) {
- ReportWriteFailure("method code size", method_idx, dex_file, out);
+ if (!out->WriteFully(&code_size, sizeof(code_size))) {
+ ReportWriteFailure("method code size", method_idx, dex_file, *out);
return 0;
}
size_code_size_ += sizeof(code_size);
relative_offset += sizeof(code_size);
DCHECK_OFFSET();
- if (!out.WriteFully(&(*quick_code)[0], code_size)) {
- ReportWriteFailure("method code", method_idx, dex_file, out);
+ if (!out->WriteFully(&(*quick_code)[0], code_size)) {
+ ReportWriteFailure("method code", method_idx, dex_file, *out);
return 0;
}
size_code_ += code_size;
@@ -818,8 +844,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
|| relative_offset == method_offsets.mapping_table_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&mapping_table[0], mapping_table_size)) {
- ReportWriteFailure("mapping table", method_idx, dex_file, out);
+ if (!out->WriteFully(&mapping_table[0], mapping_table_size)) {
+ ReportWriteFailure("mapping table", method_idx, dex_file, *out);
return 0;
}
size_mapping_table_ += mapping_table_size;
@@ -842,8 +868,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
|| relative_offset == method_offsets.vmap_table_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&vmap_table[0], vmap_table_size)) {
- ReportWriteFailure("vmap table", method_idx, dex_file, out);
+ if (!out->WriteFully(&vmap_table[0], vmap_table_size)) {
+ ReportWriteFailure("vmap table", method_idx, dex_file, *out);
return 0;
}
size_vmap_table_ += vmap_table_size;
@@ -866,8 +892,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
|| relative_offset == method_offsets.gc_map_offset_)
<< PrettyMethod(method_idx, dex_file);
- if (!out.WriteFully(&gc_map[0], gc_map_size)) {
- ReportWriteFailure("GC map", method_idx, dex_file, out);
+ if (!out->WriteFully(&gc_map[0], gc_map_size)) {
+ ReportWriteFailure("GC map", method_idx, dex_file, *out);
return 0;
}
size_gc_map_ += gc_map_size;
@@ -897,42 +923,42 @@ size_t OatWriter::OatDexFile::SizeOf() const {
+ (sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
-void OatWriter::OatDexFile::UpdateChecksum(OatHeader& oat_header) const {
- oat_header.UpdateChecksum(&dex_file_location_size_, sizeof(dex_file_location_size_));
- oat_header.UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
- oat_header.UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
- oat_header.UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
- oat_header.UpdateChecksum(&methods_offsets_[0],
+void OatWriter::OatDexFile::UpdateChecksum(OatHeader* oat_header) const {
+ oat_header->UpdateChecksum(&dex_file_location_size_, sizeof(dex_file_location_size_));
+ oat_header->UpdateChecksum(dex_file_location_data_, dex_file_location_size_);
+ oat_header->UpdateChecksum(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_));
+ oat_header->UpdateChecksum(&dex_file_offset_, sizeof(dex_file_offset_));
+ oat_header->UpdateChecksum(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
bool OatWriter::OatDexFile::Write(OatWriter* oat_writer,
- OutputStream& out,
+ OutputStream* out,
const size_t file_offset) const {
DCHECK_OFFSET_();
- if (!out.WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
- PLOG(ERROR) << "Failed to write dex file location length to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
+ PLOG(ERROR) << "Failed to write dex file location length to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_size_ += sizeof(dex_file_location_size_);
- if (!out.WriteFully(dex_file_location_data_, dex_file_location_size_)) {
- PLOG(ERROR) << "Failed to write dex file location data to " << out.GetLocation();
+ if (!out->WriteFully(dex_file_location_data_, dex_file_location_size_)) {
+ PLOG(ERROR) << "Failed to write dex file location data to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_data_ += dex_file_location_size_;
- if (!out.WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
- PLOG(ERROR) << "Failed to write dex file location checksum to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
+ PLOG(ERROR) << "Failed to write dex file location checksum to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_location_checksum_ += sizeof(dex_file_location_checksum_);
- if (!out.WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
- PLOG(ERROR) << "Failed to write dex file offset to " << out.GetLocation();
+ if (!out->WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
+ PLOG(ERROR) << "Failed to write dex file offset to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
- if (!out.WriteFully(&methods_offsets_[0],
+ if (!out->WriteFully(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
- PLOG(ERROR) << "Failed to write methods offsets to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write methods offsets to " << out->GetLocation();
return false;
}
oat_writer->size_oat_dex_file_methods_offsets_ +=
@@ -1020,48 +1046,48 @@ size_t OatWriter::OatClass::SizeOf() const {
+ (sizeof(method_offsets_[0]) * method_offsets_.size());
}
-void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
- oat_header.UpdateChecksum(&status_, sizeof(status_));
- oat_header.UpdateChecksum(&type_, sizeof(type_));
+void OatWriter::OatClass::UpdateChecksum(OatHeader* oat_header) const {
+ oat_header->UpdateChecksum(&status_, sizeof(status_));
+ oat_header->UpdateChecksum(&type_, sizeof(type_));
if (method_bitmap_size_ != 0) {
CHECK_EQ(kOatClassSomeCompiled, type_);
- oat_header.UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
- oat_header.UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
+ oat_header->UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
+ oat_header->UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
}
- oat_header.UpdateChecksum(&method_offsets_[0],
- sizeof(method_offsets_[0]) * method_offsets_.size());
+ oat_header->UpdateChecksum(&method_offsets_[0],
+ sizeof(method_offsets_[0]) * method_offsets_.size());
}
bool OatWriter::OatClass::Write(OatWriter* oat_writer,
- OutputStream& out,
+ OutputStream* out,
const size_t file_offset) const {
DCHECK_OFFSET_();
- if (!out.WriteFully(&status_, sizeof(status_))) {
- PLOG(ERROR) << "Failed to write class status to " << out.GetLocation();
+ if (!out->WriteFully(&status_, sizeof(status_))) {
+ PLOG(ERROR) << "Failed to write class status to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_status_ += sizeof(status_);
- if (!out.WriteFully(&type_, sizeof(type_))) {
- PLOG(ERROR) << "Failed to write oat class type to " << out.GetLocation();
+ if (!out->WriteFully(&type_, sizeof(type_))) {
+ PLOG(ERROR) << "Failed to write oat class type to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_type_ += sizeof(type_);
if (method_bitmap_size_ != 0) {
CHECK_EQ(kOatClassSomeCompiled, type_);
- if (!out.WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
- PLOG(ERROR) << "Failed to write method bitmap size to " << out.GetLocation();
+ if (!out->WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
+ PLOG(ERROR) << "Failed to write method bitmap size to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_bitmaps_ += sizeof(method_bitmap_size_);
- if (!out.WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
- PLOG(ERROR) << "Failed to write method bitmap to " << out.GetLocation();
+ if (!out->WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
+ PLOG(ERROR) << "Failed to write method bitmap to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_bitmaps_ += method_bitmap_size_;
}
- if (!out.WriteFully(&method_offsets_[0],
+ if (!out->WriteFully(&method_offsets_[0],
sizeof(method_offsets_[0]) * method_offsets_.size())) {
- PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
+ PLOG(ERROR) << "Failed to write method offsets to " << out->GetLocation();
return false;
}
oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 067c78971f..bab1a26d44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -78,10 +78,23 @@ class OatWriter {
return size_;
}
- bool Write(OutputStream& out);
+ bool Write(OutputStream* out);
~OatWriter();
+ struct DebugInfo {
+ DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc)
+ : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc) {
+ }
+ std::string method_name_;
+ uint32_t low_pc_;
+ uint32_t high_pc_;
+ };
+
+ const std::vector<DebugInfo>& GetCFIMethodInfo() const {
+ return method_info_;
+ }
+
private:
size_t InitOatHeader();
size_t InitOatDexFiles(size_t offset);
@@ -105,28 +118,28 @@ class OatWriter {
bool is_native, InvokeType type, uint32_t method_idx, const DexFile&)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WriteTables(OutputStream& out, const size_t file_offset);
- size_t WriteCode(OutputStream& out, const size_t file_offset);
- size_t WriteCodeDexFiles(OutputStream& out, const size_t file_offset, size_t relative_offset);
- size_t WriteCodeDexFile(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ bool WriteTables(OutputStream* out, const size_t file_offset);
+ size_t WriteCode(OutputStream* out, const size_t file_offset);
+ size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
+ size_t WriteCodeDexFile(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t* oat_class_index, const DexFile& dex_file);
- size_t WriteCodeClassDef(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ size_t WriteCodeClassDef(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t oat_class_index, const DexFile& dex_file,
const DexFile::ClassDef& class_def);
- size_t WriteCodeMethod(OutputStream& out, const size_t file_offset, size_t relative_offset,
+ size_t WriteCodeMethod(OutputStream* out, const size_t file_offset, size_t relative_offset,
size_t oat_class_index, size_t class_def_method_index,
size_t* method_offsets_index, bool is_static, uint32_t method_idx,
const DexFile& dex_file);
void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
- OutputStream& out) const;
+ const OutputStream& out) const;
class OatDexFile {
public:
explicit OatDexFile(size_t offset, const DexFile& dex_file);
size_t SizeOf() const;
- void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
+ void UpdateChecksum(OatHeader* oat_header) const;
+ bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
// Offset of start of OatDexFile from beginning of OatHeader. It is
// used to validate file position when writing.
@@ -153,8 +166,8 @@ class OatWriter {
size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
size_t SizeOf() const;
- void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
+ void UpdateChecksum(OatHeader* oat_header) const;
+ bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
DCHECK(compiled_methods_ != NULL);
@@ -205,6 +218,8 @@ class OatWriter {
DISALLOW_COPY_AND_ASSIGN(OatClass);
};
+ std::vector<DebugInfo> method_info_;
+
const CompilerDriver* const compiler_driver_;
// note OatFile does not take ownership of the DexFiles
@@ -228,6 +243,7 @@ class OatWriter {
UniquePtr<const std::vector<uint8_t> > portable_imt_conflict_trampoline_;
UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
UniquePtr<const std::vector<uint8_t> > portable_to_interpreter_bridge_;
+ UniquePtr<const std::vector<uint8_t> > quick_generic_jni_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_imt_conflict_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
UniquePtr<const std::vector<uint8_t> > quick_to_interpreter_bridge_;
@@ -244,6 +260,7 @@ class OatWriter {
uint32_t size_portable_imt_conflict_trampoline_;
uint32_t size_portable_resolution_trampoline_;
uint32_t size_portable_to_interpreter_bridge_;
+ uint32_t size_quick_generic_jni_trampoline_;
uint32_t size_quick_imt_conflict_trampoline_;
uint32_t size_quick_resolution_trampoline_;
uint32_t size_quick_to_interpreter_bridge_;
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index 6f03524651..220ff14baa 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -42,6 +42,8 @@ class ArenaBitVectorAllocator : public Allocator {
ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
bool expandable, OatBitMapKind kind)
- : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {}
+ : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {
+ UNUSED(kind_);
+}
} // namespace art
diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc
index 57c23059de..1fdc110dcf 100644
--- a/compiler/utils/arm/managed_register_arm.cc
+++ b/compiler/utils/arm/managed_register_arm.cc
@@ -21,16 +21,6 @@
namespace art {
namespace arm {
-// We need all registers for caching of locals.
-// Register R9 .. R15 are reserved.
-static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
-static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
-static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
-static const int kNumberOfAvailableOverlappingDRegisters =
- kNumberOfOverlappingDRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
-
// Returns true if this managed-register overlaps the other managed-register.
bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
if (IsNoRegister() || other.IsNoRegister()) return false;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index c9be4edbf8..296254d140 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -282,7 +282,9 @@ class AssemblerBuffer {
byte* cursor_;
byte* limit_;
AssemblerFixup* fixup_;
+#ifndef NDEBUG
bool fixups_processed_;
+#endif
// Head of linked list of slow paths
SlowPath* slow_path_;
diff --git a/compiler/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc
index 195dafb0a1..5a8c0481a5 100644
--- a/compiler/utils/mips/managed_register_mips.cc
+++ b/compiler/utils/mips/managed_register_mips.cc
@@ -21,17 +21,6 @@
namespace art {
namespace mips {
-// These core registers are never available for allocation.
-static const Register kReservedCoreRegistersArray[] = { S0, S1 };
-
-// We need all registers for caching.
-static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1;
-static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters;
-static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
-static const int kNumberOfAvailableOverlappingDRegisters =
- kNumberOfOverlappingDRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const {
if (IsNoRegister() || other.IsNoRegister()) return false;
CHECK(IsValidManagedRegister());
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 4697d06136..7fae7a8b6f 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -21,19 +21,6 @@
namespace art {
namespace x86 {
-// These cpu registers are never available for allocation.
-static const Register kReservedCpuRegistersArray[] = { ESP };
-
-
-// We reduce the number of available registers for allocation in debug-code
-// mode in order to increase register pressure.
-
-// We need all registers for caching.
-static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
-static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
-static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
-
-
// Define register pairs.
// This list must be kept in sync with the RegisterPair enum.
#define REGISTER_PAIR_LIST(P) \
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a082e36ca2..041a66b34e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -349,9 +349,9 @@ class Dex2Oat {
dump_passes,
&compiler_phases_timings));
- driver->GetCompilerBackend()->SetBitcodeFileName(bitcode_filename);
+ driver->GetCompilerBackend()->SetBitcodeFileName(*driver.get(), bitcode_filename);
- driver->CompileAll(class_loader, dex_files, timings);
+ driver->CompileAll(class_loader, dex_files, &timings);
timings.NewSplit("dex2oat OatWriter");
std::string image_file_location;
@@ -377,7 +377,7 @@ class Dex2Oat {
&timings);
TimingLogger::ScopedSplit split("Writing ELF", &timings);
- if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) {
+ if (!driver->WriteElf(android_root, is_host, dex_files, &oat_writer, oat_file)) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
return NULL;
}
@@ -760,6 +760,7 @@ static int dex2oat(int argc, char** argv) {
bool dump_passes = false;
bool dump_slow_timing = kIsDebugBuild;
bool watch_dog_enabled = !kIsTargetBuild;
+ bool generate_gdb_information = kIsDebugBuild;
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
@@ -797,6 +798,10 @@ static int dex2oat(int argc, char** argv) {
watch_dog_enabled = true;
} else if (option == "--no-watch-dog") {
watch_dog_enabled = false;
+ } else if (option == "--gen-gdb-info") {
+ generate_gdb_information = true;
+ } else if (option == "--no-gen-gdb-info") {
+ generate_gdb_information = false;
} else if (option.starts_with("-j")) {
const char* thread_count_str = option.substr(strlen("-j")).data();
if (!ParseInt(thread_count_str, &thread_count)) {
@@ -1042,7 +1047,8 @@ static int dex2oat(int argc, char** argv) {
large_method_threshold,
small_method_threshold,
tiny_method_threshold,
- num_dex_methods_threshold
+ num_dex_methods_threshold,
+ generate_gdb_information
#ifdef ART_SEA_IR_MODE
, compiler_options.sea_ir_ = true;
#endif
@@ -1302,7 +1308,7 @@ static int dex2oat(int argc, char** argv) {
LOG(INFO) << Dumpable<TimingLogger>(timings);
}
if (dump_passes) {
- LOG(INFO) << Dumpable<CumulativeLogger>(compiler.get()->GetTimingsLogger());
+ LOG(INFO) << Dumpable<CumulativeLogger>(*compiler.get()->GetTimingsLogger());
}
return EXIT_SUCCESS;
}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index 1ce7b134e0..ca08b09372 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -90,13 +90,9 @@ define build-libart-disassembler
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_STATIC_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d6d2b42e77..fd9dc4c5a7 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -74,7 +74,6 @@ LIBART_COMMON_SRC_FILES := \
intern_table.cc \
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
- interpreter/interpreter_goto_table_impl.cc \
interpreter/interpreter_switch_impl.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
@@ -125,6 +124,7 @@ LIBART_COMMON_SRC_FILES := \
oat_file.cc \
offsets.cc \
os_linux.cc \
+ parsed_options.cc \
primitive.cc \
reference_table.cc \
reflection.cc \
@@ -154,7 +154,6 @@ LIBART_COMMON_SRC_FILES += \
arch/arm/registers_arm.cc \
arch/x86/registers_x86.cc \
arch/mips/registers_mips.cc \
- arch/quick_alloc_entrypoints.cc \
entrypoints/entrypoint_utils.cc \
entrypoints/interpreter/interpreter_entrypoints.cc \
entrypoints/jni/jni_entrypoints.cc \
@@ -185,6 +184,10 @@ LIBART_COMMON_SRC_FILES += \
entrypoints/quick/quick_throw_entrypoints.cc \
entrypoints/quick/quick_trampoline_entrypoints.cc
+# Source files that only compile with GCC.
+LIBART_GCC_ONLY_SRC_FILES := \
+ interpreter/interpreter_goto_table_impl.cc
+
LIBART_LDFLAGS := -Wl,--no-fatal-warnings
LIBART_TARGET_SRC_FILES := \
@@ -300,6 +303,7 @@ endif
# $(1): target or host
# $(2): ndebug or debug
+# 3(3): true or false for LOCAL_CLANG
define build-libart
ifneq ($(1),target)
ifneq ($(1),host)
@@ -311,9 +315,15 @@ define build-libart
$$(error expected ndebug or debug for argument 2, received $(2))
endif
endif
+ ifneq ($(3),true)
+ ifneq ($(3),false)
+ $$(error expected true or false for argument 3, received $(3))
+ endif
+ endif
art_target_or_host := $(1)
art_ndebug_or_debug := $(2)
+ art_clang := $(3)
include $(CLEAR_VARS)
ifeq ($$(art_target_or_host),target)
@@ -354,11 +364,14 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
$(foreach arch,$(ART_SUPPORTED_ARCH),
LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch)))
+ ifeq ($$(art_clang),false)
+ LOCAL_SRC_FILES += $(LIBART_GCC_ONLY_SRC_FILES)
+ else
+ LOCAL_CLANG := true
+ endif
ifeq ($$(art_target_or_host),target)
- LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
endif
ifeq ($$(art_ndebug_or_debug),debug)
@@ -389,7 +402,14 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_LDLIBS += -lrt
endif
endif
- include $(LLVM_GEN_INTRINSICS_MK)
+ ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ include $(LLVM_GEN_INTRINSICS_MK)
+ ifeq ($$(art_target_or_host),target)
+ include $(LLVM_DEVICE_BUILD_MK)
+ else # host
+ include $(LLVM_HOST_BUILD_MK)
+ endif
+ endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
@@ -398,27 +418,25 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endif
ifeq ($$(art_target_or_host),target)
- include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
- include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-libart,target,ndebug))
+ $(eval $(call build-libart,target,ndebug,$(ART_TARGET_CLANG)))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-libart,target,debug))
+ $(eval $(call build-libart,target,debug,$(ART_TARGET_CLANG)))
endif
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
ifeq ($(WITH_HOST_DALVIK),true)
ifeq ($(ART_BUILD_NDEBUG),true)
- $(eval $(call build-libart,host,ndebug))
+ $(eval $(call build-libart,host,ndebug,$(ART_HOST_CLANG)))
endif
ifeq ($(ART_BUILD_DEBUG),true)
- $(eval $(call build-libart,host,debug))
+ $(eval $(call build-libart,host,debug,$(ART_HOST_CLANG)))
endif
endif
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 5166d29096..23e3433116 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -16,6 +16,7 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -127,7 +128,8 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+// Generic JNI downcall
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
@@ -182,6 +184,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
// Locks
qpoints->pLockObject = art_quick_lock_object;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index ed8bc13fc5..71dcd7f7a6 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -939,6 +939,8 @@ ENTRY art_quick_resolution_trampoline
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
+UNIMPLEMENTED art_quick_generic_jni_trampoline
+
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e1b441ac9d..500a2ebaaf 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -15,6 +15,7 @@
*/
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -128,6 +129,9 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+// Generic JNI downcall
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+
extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
@@ -183,6 +187,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
// Locks
qpoints->pLockObject = art_quick_lock_object;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index d23be47804..c3ae5630d4 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1007,6 +1007,8 @@ ENTRY art_quick_resolution_trampoline
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
+UNIMPLEMENTED art_quick_generic_jni_trampoline
+
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
GENERATE_GLOBAL_POINTER
diff --git a/runtime/arch/quick_alloc_entrypoints.cc b/runtime/arch/quick_alloc_entrypoints.cc
deleted file mode 100644
index 9363f81cfe..0000000000
--- a/runtime/arch/quick_alloc_entrypoints.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "gc/heap.h"
-
-#define GENERATE_ENTRYPOINTS(suffix) \
-extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_alloc_array_resolved##suffix(void* klass, void*, int32_t); \
-extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method); \
-extern "C" void* art_quick_alloc_object_resolved##suffix(void* klass, void* method); \
-extern "C" void* art_quick_alloc_object_initialized##suffix(void* klass, void* method); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, void* method); \
-extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(void* klass, void*, int32_t); \
-extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method); \
-extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(void* klass, void* method); \
-extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(void* klass, void* method); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, void* method); \
-extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
-void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \
- if (instrumented) { \
- qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
- qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
- qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
- qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
- qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
- qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
- qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
- } else { \
- qpoints->pAllocArray = art_quick_alloc_array##suffix; \
- qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
- qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix; \
- qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
- qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
- qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
- qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
- } \
-}
-
-namespace art {
-
-// Generate the entrypoint functions.
-GENERATE_ENTRYPOINTS(_dlmalloc);
-GENERATE_ENTRYPOINTS(_rosalloc);
-GENERATE_ENTRYPOINTS(_bump_pointer);
-GENERATE_ENTRYPOINTS(_tlab);
-
-static bool entry_points_instrumented = false;
-static gc::AllocatorType entry_points_allocator = gc::kAllocatorTypeDlMalloc;
-
-void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator) {
- entry_points_allocator = allocator;
-}
-
-void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
- entry_points_instrumented = instrumented;
-}
-
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
- switch (entry_points_allocator) {
- case gc::kAllocatorTypeDlMalloc: {
- SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
- break;
- }
- case gc::kAllocatorTypeRosAlloc: {
- SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
- break;
- }
- case gc::kAllocatorTypeBumpPointer: {
- CHECK(kMovingCollector);
- SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
- break;
- }
- case gc::kAllocatorTypeTLAB: {
- CHECK(kMovingCollector);
- SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
- break;
- }
- default: {
- LOG(FATAL) << "Unimplemented";
- }
- }
-}
-
-} // namespace art
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 9ec199518b..267717a28f 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -19,38 +19,25 @@
#include "asm_support_x86.h"
-#if defined(__APPLE__)
- // Mac OS' as(1) doesn't let you name macro parameters.
+#if defined(__clang__)
+ // Clang's as(1) doesn't let you name macro parameters.
#define MACRO0(macro_name) .macro macro_name
#define MACRO1(macro_name, macro_arg1) .macro macro_name
#define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
#define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
#define END_MACRO .endmacro
- // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
- // are mangled with an extra underscore prefix. The use of $x for arguments
- // mean that literals need to be represented with $$x in macros.
- #define SYMBOL(name) _ ## name
- #define PLT_SYMBOL(name) _ ## name
+ // Clang's as(1) uses $0, $1, and so on for macro arguments.
#define VAR(name,index) SYMBOL($index)
#define PLT_VAR(name, index) SYMBOL($index)
#define REG_VAR(name,index) %$index
#define CALL_MACRO(name,index) $index
+ #define FUNCTION_TYPE(name,index) .type $index, @function
+ #define SIZE(name,index) .size $index, .-$index
+
+ // The use of $x for arguments mean that literals need to be represented with $$x in macros.
#define LITERAL(value) $value
#define MACRO_LITERAL(value) $$value
-
- // Mac OS' doesn't like cfi_* directives
- #define CFI_STARTPROC
- #define CFI_ENDPROC
- #define CFI_ADJUST_CFA_OFFSET(size)
- #define CFI_DEF_CFA(reg,size)
- #define CFI_DEF_CFA_REGISTER(reg)
- #define CFI_RESTORE(reg)
- #define CFI_REL_OFFSET(reg,size)
-
- // Mac OS' doesn't support certain directives
- #define FUNCTION_TYPE(name)
- #define SIZE(name)
#else
// Regular gas(1) lets you name macro parameters.
#define MACRO0(macro_name) .macro macro_name
@@ -65,16 +52,19 @@
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
// special character meaning care needs to be taken when passing registers as macro arguments.
.altmacro
- #define SYMBOL(name) name
- #define PLT_SYMBOL(name) name@PLT
#define VAR(name,index) name&
#define PLT_VAR(name, index) name&@PLT
#define REG_VAR(name,index) %name
#define CALL_MACRO(name,index) name&
+ #define FUNCTION_TYPE(name,index) .type name&, @function
+ #define SIZE(name,index) .size name, .-name
+
#define LITERAL(value) $value
#define MACRO_LITERAL(value) $value
+#endif
- // CFI support
+ // CFI support.
+#if !defined(__APPLE__)
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_ADJUST_CFA_OFFSET(size) .cfi_adjust_cfa_offset size
@@ -82,9 +72,25 @@
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+#else
+ // Mac OS' doesn't like cfi_* directives.
+ #define CFI_STARTPROC
+ #define CFI_ENDPROC
+ #define CFI_ADJUST_CFA_OFFSET(size)
+ #define CFI_DEF_CFA(reg,size)
+ #define CFI_DEF_CFA_REGISTER(reg)
+ #define CFI_RESTORE(reg)
+ #define CFI_REL_OFFSET(reg,size)
+#endif
- #define FUNCTION_TYPE(name) .type name&, @function
- #define SIZE(name) .size name, .-name
+ // Symbols.
+#if !defined(__APPLE__)
+ #define SYMBOL(name) name
+ #define PLT_SYMBOL(name) name
+#else
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+ #define PLT_SYMBOL(name) _ ## name
#endif
/* Cache alignment for function entry */
@@ -93,7 +99,7 @@ MACRO0(ALIGN_FUNCTION_ENTRY)
END_MACRO
MACRO1(DEFINE_FUNCTION, c_name)
- FUNCTION_TYPE(\c_name)
+ FUNCTION_TYPE(\c_name, 0)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
@@ -102,7 +108,7 @@ END_MACRO
MACRO1(END_FUNCTION, c_name)
CFI_ENDPROC
- SIZE(\c_name)
+ SIZE(\c_name, 0)
END_MACRO
MACRO1(PUSH, reg)
@@ -118,7 +124,7 @@ MACRO1(POP, reg)
END_MACRO
MACRO1(UNIMPLEMENTED,name)
- FUNCTION_TYPE(\name)
+ FUNCTION_TYPE(\name, 0)
.globl VAR(name, 0)
ALIGN_FUNCTION_ENTRY
VAR(name, 0):
@@ -126,7 +132,7 @@ VAR(name, 0):
int3
int3
CFI_ENDPROC
- SIZE(\name)
+ SIZE(\name, 0)
END_MACRO
MACRO0(SETUP_GOT_NOSAVE)
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 888310ac51..c4a7b1ba2e 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -15,6 +15,7 @@
*/
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
@@ -109,6 +110,9 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+// Generic JNI downcall
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+
extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
@@ -164,6 +168,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
// Locks
qpoints->pLockObject = art_quick_lock_object;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8683a56855..b24bfd586b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1170,6 +1170,11 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
+DEFINE_FUNCTION art_quick_generic_jni_trampoline
+ int3
+ int3
+END_FUNCTION art_quick_generic_jni_trampoline
+
DEFINE_FUNCTION art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame
mov %esp, %edx // remember SP
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 589c7d9dc9..30067cf268 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -15,6 +15,7 @@
*/
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
@@ -110,6 +111,9 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+// Generic JNI entrypoint
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+
extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
@@ -165,6 +169,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
// Locks
qpoints->pLockObject = art_quick_lock_object;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ac238f0d50..32e8434a6c 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -635,6 +635,12 @@ UNIMPLEMENTED art_quick_proxy_invoke_handler
UNIMPLEMENTED art_quick_imt_conflict_trampoline
UNIMPLEMENTED art_quick_resolution_trampoline
+
+ /*
+ * Called to do a generic JNI down-call
+ */
+UNIMPLEMENTED art_quick_generic_jni_trampoline
+
/*
* Called to bridge from the quick to interpreter ABI. On entry the arguments match those
* of a quick call:
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 3da7409678..754d1dd8c2 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -46,6 +46,7 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* e
return array_class;
}
}
+ DCHECK(!element_class->IsPrimitiveVoid());
std::string descriptor("[");
descriptor += ClassHelper(element_class).GetDescriptor();
SirtRef<mirror::ClassLoader> class_loader(self, element_class->GetClassLoader());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 87323f91e7..6550532f3b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -184,7 +184,8 @@ ClassLinker::ClassLinker(InternTable* intern_table)
portable_resolution_trampoline_(nullptr),
quick_resolution_trampoline_(nullptr),
portable_imt_conflict_trampoline_(nullptr),
- quick_imt_conflict_trampoline_(nullptr) {
+ quick_imt_conflict_trampoline_(nullptr),
+ quick_generic_jni_trampoline_(nullptr) {
CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
}
@@ -987,6 +988,7 @@ void ClassLinker::InitFromImage() {
quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline();
portable_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetPortableImtConflictTrampoline();
quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
+ quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
@@ -1623,7 +1625,8 @@ static bool NeedsInterpreter(mirror::ArtMethod* method, const void* quick_code,
const void* portable_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if ((quick_code == nullptr) && (portable_code == nullptr)) {
// No code: need interpreter.
- DCHECK(!method->IsNative());
+ // May return true for native code, in the case of generic JNI
+ // DCHECK(!method->IsNative());
return true;
}
#ifdef ART_SEA_IR_MODE
@@ -1678,8 +1681,14 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
bool have_portable_code = false;
if (enter_interpreter) {
// Use interpreter entry point.
+
+ // check whether the method is native, in which case it's generic JNI
portable_code = GetPortableToInterpreterBridge();
- quick_code = GetQuickToInterpreterBridge();
+ if (quick_code == nullptr && portable_code == nullptr && method->IsNative()) {
+ quick_code = GetQuickGenericJniTrampoline();
+ } else {
+ quick_code = GetQuickToInterpreterBridge();
+ }
} else {
if (portable_code == nullptr) {
portable_code = GetPortableToQuickBridge();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 88dbb9c44e..e31a6cdcf0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -351,6 +351,10 @@ class ClassLinker {
return portable_resolution_trampoline_;
}
+ const void* GetQuickGenericJniTrampoline() const {
+ return quick_generic_jni_trampoline_;
+ }
+
const void* GetQuickResolutionTrampoline() const {
return quick_resolution_trampoline_;
}
@@ -643,6 +647,7 @@ class ClassLinker {
const void* quick_resolution_trampoline_;
const void* portable_imt_conflict_trampoline_;
const void* quick_imt_conflict_trampoline_;
+ const void* quick_generic_jni_trampoline_;
friend class ImageWriter; // for GetClassRoots
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
diff --git a/runtime/class_reference.h b/runtime/class_reference.h
index 77c296facd..7b206126d6 100644
--- a/runtime/class_reference.h
+++ b/runtime/class_reference.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_REFERENCE_H_
#include <stdint.h>
+#include <utility>
namespace art {
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 9045f3af71..7233d8ee7f 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_COMPILER_CALLBACKS_H_
#include "class_reference.h"
+#include "locks.h"
namespace art {
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 3a17e41487..fac6f53e74 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -22,6 +22,83 @@
namespace art {
+// -------------------------------------------------------------------
+// Binary GDB JIT Interface as described in
+// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
+extern "C" {
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } JITAction;
+
+ struct JITCodeEntry {
+ JITCodeEntry* next_;
+ JITCodeEntry* prev_;
+ const byte *symfile_addr_;
+ uint64_t symfile_size_;
+ };
+
+ struct JITDescriptor {
+ uint32_t version_;
+ uint32_t action_flag_;
+ JITCodeEntry* relevant_entry_;
+ JITCodeEntry* first_entry_;
+ };
+
+ // GDB will place breakpoint into this function.
+ // To prevent GCC from inlining or removing it we place noinline attribute
+ // and inline assembler statement inside.
+ void __attribute__((noinline)) __jit_debug_register_code() {
+ __asm__("");
+ }
+
+ // GDB will inspect contents of this descriptor.
+ // Static initialization is necessary to prevent GDB from seeing
+ // uninitialized descriptor.
+ JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+}
+
+
+static JITCodeEntry* CreateCodeEntry(const byte *symfile_addr,
+ uintptr_t symfile_size) {
+ JITCodeEntry* entry = new JITCodeEntry;
+ entry->symfile_addr_ = symfile_addr;
+ entry->symfile_size_ = symfile_size;
+ entry->prev_ = nullptr;
+
+ // TODO: Do we need a lock here?
+ entry->next_ = __jit_debug_descriptor.first_entry_;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __jit_debug_descriptor.first_entry_ = entry;
+ __jit_debug_descriptor.relevant_entry_ = entry;
+
+ __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_register_code();
+ return entry;
+}
+
+
+static void UnregisterCodeEntry(JITCodeEntry* entry) {
+ // TODO: Do we need a lock here?
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __jit_debug_descriptor.first_entry_ = entry->next_;
+ }
+
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+
+ __jit_debug_descriptor.relevant_entry_ = entry;
+ __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_register_code();
+ delete entry;
+}
+
ElfFile::ElfFile(File* file, bool writable, bool program_header_only)
: file_(file),
writable_(writable),
@@ -38,7 +115,9 @@ ElfFile::ElfFile(File* file, bool writable, bool program_header_only)
dynstr_section_start_(NULL),
hash_section_start_(NULL),
symtab_symbol_table_(NULL),
- dynsym_symbol_table_(NULL) {
+ dynsym_symbol_table_(NULL),
+ jit_elf_image_(NULL),
+ jit_gdb_entry_(NULL) {
CHECK(file != NULL);
}
@@ -172,6 +251,10 @@ ElfFile::~ElfFile() {
STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
+ delete jit_elf_image_;
+ if (jit_gdb_entry_) {
+ UnregisterCodeEntry(jit_gdb_entry_);
+ }
}
bool ElfFile::SetMap(MemMap* map, std::string* error_msg) {
@@ -830,6 +913,11 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
}
}
+ // Use GDB JIT support to do stack backtrace, etc.
+ if (executable) {
+ GdbJITSupport();
+ }
+
return true;
}
@@ -843,4 +931,272 @@ bool ElfFile::ValidPointer(const byte* start) const {
return false;
}
+static bool check_section_name(ElfFile& file, int section_num, const char *name) {
+ Elf32_Shdr& section_header = file.GetSectionHeader(section_num);
+ const char *section_name = file.GetString(SHT_SYMTAB, section_header.sh_name);
+ return strcmp(name, section_name) == 0;
+}
+
+static void IncrementUint32(byte *p, uint32_t increment) {
+ uint32_t *u = reinterpret_cast<uint32_t *>(p);
+ *u += increment;
+}
+
+static void RoundAndClear(byte *image, uint32_t& offset, int pwr2) {
+ uint32_t mask = pwr2 - 1;
+ while (offset & mask) {
+ image[offset++] = 0;
+ }
+}
+
+// Simple macro to bump a point to a section header to the next one.
+#define BUMP_SHENT(sp) \
+ sp = reinterpret_cast<Elf32_Shdr *> (\
+ reinterpret_cast<byte*>(sp) + elf_hdr.e_shentsize);\
+ offset += elf_hdr.e_shentsize
+
+void ElfFile::GdbJITSupport() {
+ // We only get here if we only are mapping the program header.
+ DCHECK(program_header_only_);
+
+ // Well, we need the whole file to do this.
+ std::string error_msg;
+ UniquePtr<ElfFile> ptr(Open(const_cast<File*>(file_), false, false, &error_msg));
+ ElfFile& all = *ptr;
+
+ // Do we have interesting sections?
+ // Is this an OAT file with interesting sections?
+ if (all.GetSectionHeaderNum() != kExpectedSectionsInOATFile) {
+ return;
+ }
+ if (!check_section_name(all, 8, ".debug_info") ||
+ !check_section_name(all, 9, ".debug_abbrev") ||
+ !check_section_name(all, 10, ".debug_frame") ||
+ !check_section_name(all, 11, ".debug_str")) {
+ return;
+ }
+
+ // This is not needed if we have no .text segment.
+ uint32_t text_start_addr = 0;
+ for (uint32_t i = 0; i < segments_.size(); i++) {
+ if (segments_[i]->GetProtect() & PROT_EXEC) {
+ // We found the .text section.
+ text_start_addr = PointerToLowMemUInt32(segments_[i]->Begin());
+ break;
+ }
+ }
+ if (text_start_addr == 0U) {
+ return;
+ }
+
+ // Okay, we are good enough. Fake up an ELF image and tell GDB about it.
+ // We need some extra space for the debug and string sections, the ELF header, and the
+ // section header.
+ uint32_t needed_size = KB;
+
+ for (Elf32_Word i = 1; i < all.GetSectionHeaderNum(); i++) {
+ Elf32_Shdr& section_header = all.GetSectionHeader(i);
+ if (section_header.sh_addr == 0 && section_header.sh_type != SHT_DYNSYM) {
+ // Debug section: we need it.
+ needed_size += section_header.sh_size;
+ } else if (section_header.sh_type == SHT_STRTAB &&
+ strcmp(".shstrtab",
+ all.GetString(SHT_SYMTAB, section_header.sh_name)) == 0) {
+ // We also need the shared string table.
+ needed_size += section_header.sh_size;
+
+ // We also need the extra strings .symtab\0.strtab\0
+ needed_size += 16;
+ }
+ }
+
+ // Start creating our image.
+ jit_elf_image_ = new byte[needed_size];
+
+ // Create the Elf Header by copying the old one
+ Elf32_Ehdr& elf_hdr =
+ *reinterpret_cast<Elf32_Ehdr*>(jit_elf_image_);
+
+ elf_hdr = all.GetHeader();
+ elf_hdr.e_entry = 0;
+ elf_hdr.e_phoff = 0;
+ elf_hdr.e_phnum = 0;
+ elf_hdr.e_phentsize = 0;
+ elf_hdr.e_type = ET_EXEC;
+
+ uint32_t offset = sizeof(Elf32_Ehdr);
+
+ // Copy the debug sections and string table.
+ uint32_t debug_offsets[kExpectedSectionsInOATFile];
+ memset(debug_offsets, '\0', sizeof debug_offsets);
+ Elf32_Shdr *text_header = nullptr;
+ int extra_shstrtab_entries = -1;
+ int text_section_index = -1;
+ int section_index = 1;
+ for (Elf32_Word i = 1; i < kExpectedSectionsInOATFile; i++) {
+ Elf32_Shdr& section_header = all.GetSectionHeader(i);
+ // Round up to multiple of 4, ensuring zero fill.
+ RoundAndClear(jit_elf_image_, offset, 4);
+ if (section_header.sh_addr == 0 && section_header.sh_type != SHT_DYNSYM) {
+ // Debug section: we need it. Unfortunately, it wasn't mapped in.
+ debug_offsets[i] = offset;
+ // Read it from the file.
+ lseek(file_->Fd(), section_header.sh_offset, SEEK_SET);
+ read(file_->Fd(), jit_elf_image_ + offset, section_header.sh_size);
+ offset += section_header.sh_size;
+ section_index++;
+ offset += 16;
+ } else if (section_header.sh_type == SHT_STRTAB &&
+ strcmp(".shstrtab",
+ all.GetString(SHT_SYMTAB, section_header.sh_name)) == 0) {
+ // We also need the shared string table.
+ debug_offsets[i] = offset;
+ // Read it from the file.
+ lseek(file_->Fd(), section_header.sh_offset, SEEK_SET);
+ read(file_->Fd(), jit_elf_image_ + offset, section_header.sh_size);
+ offset += section_header.sh_size;
+ // We also need the extra strings .symtab\0.strtab\0
+ extra_shstrtab_entries = section_header.sh_size;
+ memcpy(jit_elf_image_+offset, ".symtab\0.strtab\0", 16);
+ offset += 16;
+ section_index++;
+ } else if (section_header.sh_flags & SHF_EXECINSTR) {
+ DCHECK(strcmp(".text", all.GetString(SHT_SYMTAB,
+ section_header.sh_name)) == 0);
+ text_header = &section_header;
+ text_section_index = section_index++;
+ }
+ }
+ DCHECK(text_header != nullptr);
+ DCHECK_NE(extra_shstrtab_entries, -1);
+
+ // We now need to update the addresses for debug_info and debug_frame to get to the
+ // correct offset within the .text section.
+ byte *p = jit_elf_image_+debug_offsets[8];
+ byte *end = p + all.GetSectionHeader(8).sh_size;
+
+ // For debug_info; patch compilation using low_pc @ offset 13, high_pc at offset 17.
+ IncrementUint32(p + 13, text_start_addr);
+ IncrementUint32(p + 17, text_start_addr);
+
+ // Now fix the low_pc, high_pc for each method address.
+ // First method starts at offset 0x15, each subsequent method is 1+3*4 bytes further.
+ for (p += 0x15; p < end; p += 1 /* attr# */ + 3 * sizeof(uint32_t) /* addresses */) {
+ IncrementUint32(p + 1 + sizeof(uint32_t), text_start_addr);
+ IncrementUint32(p + 1 + 2 * sizeof(uint32_t), text_start_addr);
+ }
+
+ // Now we have to handle the debug_frame method start addresses
+ p = jit_elf_image_+debug_offsets[10];
+ end = p + all.GetSectionHeader(10).sh_size;
+
+ // Skip past the CIE.
+ p += *reinterpret_cast<uint32_t *>(p) + 4;
+
+ // And walk the FDEs.
+ for (; p < end; p += *reinterpret_cast<uint32_t *>(p) + sizeof(uint32_t)) {
+ IncrementUint32(p + 2 * sizeof(uint32_t), text_start_addr);
+ }
+
+ // Create the data for the symbol table.
+ const int kSymbtabAlignment = 16;
+ RoundAndClear(jit_elf_image_, offset, kSymbtabAlignment);
+ uint32_t symtab_offset = offset;
+
+ // First entry is empty.
+ memset(jit_elf_image_+offset, 0, sizeof(Elf32_Sym));
+ offset += sizeof(Elf32_Sym);
+
+ // Symbol 1 is the real .text section.
+ Elf32_Sym& sym_ent = *reinterpret_cast<Elf32_Sym*>(jit_elf_image_+offset);
+ sym_ent.st_name = 1; /* .text */
+ sym_ent.st_value = text_start_addr;
+ sym_ent.st_size = text_header->sh_size;
+ SetBindingAndType(&sym_ent, STB_LOCAL, STT_SECTION);
+ sym_ent.st_other = 0;
+ sym_ent.st_shndx = text_section_index;
+ offset += sizeof(Elf32_Sym);
+
+ // Create the data for the string table.
+ RoundAndClear(jit_elf_image_, offset, kSymbtabAlignment);
+ const int kTextStringSize = 7;
+ uint32_t strtab_offset = offset;
+ memcpy(jit_elf_image_+offset, "\0.text", kTextStringSize);
+ offset += kTextStringSize;
+
+ // Create the section header table.
+ // Round up to multiple of kSymbtabAlignment, ensuring zero fill.
+ RoundAndClear(jit_elf_image_, offset, kSymbtabAlignment);
+ elf_hdr.e_shoff = offset;
+ Elf32_Shdr *sp =
+ reinterpret_cast<Elf32_Shdr *>(jit_elf_image_ + offset);
+
+ // Copy the first empty index.
+ *sp = all.GetSectionHeader(0);
+ BUMP_SHENT(sp);
+
+ elf_hdr.e_shnum = 1;
+ for (Elf32_Word i = 1; i < kExpectedSectionsInOATFile; i++) {
+ Elf32_Shdr& section_header = all.GetSectionHeader(i);
+ if (section_header.sh_addr == 0 && section_header.sh_type != SHT_DYNSYM) {
+ // Debug section: we need it.
+ *sp = section_header;
+ sp->sh_offset = debug_offsets[i];
+ sp->sh_addr = 0;
+ elf_hdr.e_shnum++;
+ BUMP_SHENT(sp);
+ } else if (section_header.sh_type == SHT_STRTAB &&
+ strcmp(".shstrtab",
+ all.GetString(SHT_SYMTAB, section_header.sh_name)) == 0) {
+ // We also need the shared string table.
+ *sp = section_header;
+ sp->sh_offset = debug_offsets[i];
+ sp->sh_size += 16; /* sizeof ".symtab\0.strtab\0" */
+ sp->sh_addr = 0;
+ elf_hdr.e_shstrndx = elf_hdr.e_shnum;
+ elf_hdr.e_shnum++;
+ BUMP_SHENT(sp);
+ }
+ }
+
+ // Add a .text section for the matching code section.
+ *sp = *text_header;
+ sp->sh_type = SHT_NOBITS;
+ sp->sh_offset = 0;
+ sp->sh_addr = text_start_addr;
+ elf_hdr.e_shnum++;
+ BUMP_SHENT(sp);
+
+ // .symtab section: Need an empty index and the .text entry
+ sp->sh_name = extra_shstrtab_entries;
+ sp->sh_type = SHT_SYMTAB;
+ sp->sh_flags = 0;
+ sp->sh_addr = 0;
+ sp->sh_offset = symtab_offset;
+ sp->sh_size = 2 * sizeof(Elf32_Sym);
+ sp->sh_link = elf_hdr.e_shnum + 1; // Link to .strtab section.
+ sp->sh_info = 0;
+ sp->sh_addralign = 16;
+ sp->sh_entsize = sizeof(Elf32_Sym);
+ elf_hdr.e_shnum++;
+ BUMP_SHENT(sp);
+
+ // .strtab section: Enough for .text\0.
+ sp->sh_name = extra_shstrtab_entries + 8;
+ sp->sh_type = SHT_STRTAB;
+ sp->sh_flags = 0;
+ sp->sh_addr = 0;
+ sp->sh_offset = strtab_offset;
+ sp->sh_size = kTextStringSize;
+ sp->sh_link = 0;
+ sp->sh_info = 0;
+ sp->sh_addralign = 16;
+ sp->sh_entsize = 0;
+ elf_hdr.e_shnum++;
+ BUMP_SHENT(sp);
+
+ // We now have enough information to tell GDB about our file.
+ jit_gdb_entry_ = CreateCodeEntry(jit_elf_image_, offset);
+}
+
} // namespace art
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 8a0a5f84c4..d2a044e9cf 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -29,6 +29,12 @@
namespace art {
+// Interface to GDB JIT for backtrace information.
+extern "C" {
+ struct JITCodeEntry;
+}
+
+
// Used for compile time and runtime for ElfFile access. Because of
// the need for use at runtime, cannot directly use LLVM classes such as
// ELFObjectFile.
@@ -171,6 +177,13 @@ class ElfFile {
SymbolTable* symtab_symbol_table_;
SymbolTable* dynsym_symbol_table_;
+
+ // Support for GDB JIT
+ byte* jit_elf_image_;
+ JITCodeEntry* jit_gdb_entry_;
+ void GdbJITSupport();
+ // Is this an OAT file with debug information in it?
+ static constexpr uint32_t kExpectedSectionsInOATFile = 12;
};
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 2ced942240..a8fb6c14a1 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -733,6 +733,11 @@ static inline const void* GetQuickToInterpreterBridge() {
return reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
}
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+static inline const void* GetQuickGenericJniTrampoline() {
+ return reinterpret_cast<void*>(art_quick_generic_jni_trampoline);
+}
+
static inline const void* GetQuickToPortableBridge() {
// TODO: quick to portable bridge. Bug: 8196384
return GetQuickToInterpreterBridge();
@@ -754,6 +759,10 @@ static inline const void* GetQuickImtConflictTrampoline(ClassLinker* class_linke
return class_linker->GetQuickImtConflictTrampoline();
}
+static inline const void* GetQuickGenericJniTrampoline(ClassLinker* class_linker) {
+ return class_linker->GetQuickGenericJniTrampoline();
+}
+
extern "C" void art_portable_proxy_invoke_handler();
static inline const void* GetPortableProxyInvokeHandler() {
return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 2e1b69d236..ccc0f3ded5 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
+
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils.h"
#include "mirror/art_method-inl.h"
@@ -104,4 +106,90 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RosAlloc, gc::kAllocatorTypeRosAlloc)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(BumpPointer, gc::kAllocatorTypeBumpPointer)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(TLAB, gc::kAllocatorTypeTLAB)
+#define GENERATE_ENTRYPOINTS(suffix) \
+extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved##suffix(void* klass, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(void* klass, void*, int32_t); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
+void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \
+ if (instrumented) { \
+ qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
+ qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
+ qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
+ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
+ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
+ } else { \
+ qpoints->pAllocArray = art_quick_alloc_array##suffix; \
+ qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
+ qpoints->pAllocObject = art_quick_alloc_object##suffix; \
+ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
+ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
+ } \
+}
+
+// Generate the entrypoint functions.
+GENERATE_ENTRYPOINTS(_dlmalloc);
+GENERATE_ENTRYPOINTS(_rosalloc);
+GENERATE_ENTRYPOINTS(_bump_pointer);
+GENERATE_ENTRYPOINTS(_tlab);
+
+static bool entry_points_instrumented = false;
+static gc::AllocatorType entry_points_allocator = gc::kAllocatorTypeDlMalloc;
+
+void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator) {
+ entry_points_allocator = allocator;
+}
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
+ entry_points_instrumented = instrumented;
+}
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+ switch (entry_points_allocator) {
+ case gc::kAllocatorTypeDlMalloc: {
+ SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
+ break;
+ }
+ case gc::kAllocatorTypeRosAlloc: {
+ SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
+ break;
+ }
+ case gc::kAllocatorTypeBumpPointer: {
+ CHECK(kMovingCollector);
+ SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
+ break;
+ }
+ case gc::kAllocatorTypeTLAB: {
+ CHECK(kMovingCollector);
+ SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unimplemented";
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
new file mode 100644
index 0000000000..7fd3fe9040
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
+
+#include "gc/heap.h"
+#include "quick_entrypoints.h"
+
+namespace art {
+
+namespace gc {
+enum AllocatorType;
+} // namespace gc
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+
+// Runtime shutdown lock is necessary to prevent races in thread initialization. When the thread is
+// starting it doesn't hold the mutator lock until after it has been added to the thread list.
+// However, Thread::Init is guarded by the runtime shutdown lock, so we can prevent these races by
+// holding the runtime shutdown lock and the mutator lock when we update the entrypoints.
+
+void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_);
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 011e92693e..5c3b8243ff 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -87,6 +87,7 @@ struct PACKED(4) QuickEntryPoints {
mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
jobject locked, Thread* self);
+ void (*pQuickGenericJniTrampoline)(mirror::ArtMethod*);
// Locks
void (*pLockObject)(void*);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 59da7a05cb..737fa3e735 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -92,6 +92,7 @@ extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_
}
CheckReferenceResult(o, self);
}
+ VerifyObject(o);
return o;
}
@@ -109,6 +110,7 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
}
CheckReferenceResult(o, self);
}
+ VerifyObject(o);
return o;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5339b5ea1f..ef40be825a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -817,4 +817,13 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
return code;
}
+extern "C" const void* artQuickGenericJniTrampoline(mirror::ArtMethod* called,
+ mirror::Object* receiver,
+ Thread* thread, mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(FATAL) << "artQuickGenericJniTrampoline not implemented: "
+ << PrettyMethod(called);
+ return NULL;
+}
+
} // namespace art
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index f0c4d0d788..564168e274 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -154,26 +154,31 @@ inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const
// Now we have the words, we can process words in parallel.
uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
uintptr_t* word_end = reinterpret_cast<uintptr_t*>(card_end);
- uintptr_t expected_word;
- uintptr_t new_word;
+ // TODO: This is not big endian safe.
+ union {
+ uintptr_t expected_word;
+ uint8_t expected_bytes[sizeof(uintptr_t)];
+ };
+ union {
+ uintptr_t new_word;
+ uint8_t new_bytes[sizeof(uintptr_t)];
+ };
// TODO: Parallelize.
while (word_cur < word_end) {
- while ((expected_word = *word_cur) != 0) {
- new_word =
- (visitor((expected_word >> 0) & 0xFF) << 0) |
- (visitor((expected_word >> 8) & 0xFF) << 8) |
- (visitor((expected_word >> 16) & 0xFF) << 16) |
- (visitor((expected_word >> 24) & 0xFF) << 24);
- if (new_word == expected_word) {
- // No need to do a cas.
+ while (true) {
+ expected_word = *word_cur;
+ if (LIKELY(expected_word == 0)) {
break;
}
+ for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
+ new_bytes[i] = visitor(expected_bytes[i]);
+ }
if (LIKELY(android_atomic_cas(expected_word, new_word,
reinterpret_cast<int32_t*>(word_cur)) == 0)) {
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
- const byte expected_byte = (expected_word >> (8 * i)) & 0xFF;
- const byte new_byte = (new_word >> (8 * i)) & 0xFF;
+ const byte expected_byte = expected_bytes[i];
+ const byte new_byte = new_bytes[i];
if (expected_byte != new_byte) {
modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 5b4ca80b0d..dd2bb5ded0 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -439,9 +439,6 @@ class RosAlloc {
hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
// The set of free pages.
std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
- // The free page run whose end address is the end of the memory
- // region that's managed by this allocator, if any.
- FreePageRun* last_free_page_run_;
// The current runs where the allocations are first attempted for
// the size brackes that do not use thread-local
// runs. current_runs_[i] is guarded by size_bracket_locks_[i].
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7b2bc3b9de..c39e56fd2d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -861,7 +861,7 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
mark_stack_end -= mark_stack_increment;
mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
- DCHECK_EQ(mark_stack_end, mark_stack_->End());
+ DCHECK_EQ(mark_stack_end, const_cast<const art::mirror::Object **>(mark_stack_->End()));
// Add the new task to the thread pool.
auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
card_begin + card_increment, minimum_age,
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a4c9deaae6..4668a1985f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -61,7 +61,8 @@ namespace gc {
namespace collector {
static constexpr bool kProtectFromSpace = true;
-static constexpr bool kResetFromSpace = true;
+static constexpr bool kClearFromSpace = true;
+static constexpr bool kStoreStackTraces = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -169,6 +170,19 @@ void SemiSpace::ProcessReferences(Thread* self) {
}
void SemiSpace::MarkingPhase() {
+ if (kStoreStackTraces) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self_);
+ // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // related crash later.
+ ThreadState old_state = self_->SetStateUnsafe(kRunnable);
+ std::ostringstream oss;
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->DumpForSigQuit(oss);
+ runtime->GetThreadList()->DumpNativeStacks(oss);
+ runtime->SetFaultMessage(oss.str());
+ CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
+ }
+
if (generational_) {
if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
clear_soft_references_) {
@@ -353,19 +367,17 @@ void SemiSpace::ReclaimPhase() {
TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
GetHeap()->UnBindBitmaps();
}
- // Release the memory used by the from space.
- if (kResetFromSpace) {
- // Clearing from space.
+ if (kClearFromSpace) {
+ // Release the memory used by the from space.
from_space_->Clear();
}
+ from_space_->Reset();
// Protect the from space.
- VLOG(heap)
- << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
- << reinterpret_cast<void*>(from_space_->Limit());
+ VLOG(heap) << "Protecting space " << *from_space_;
if (kProtectFromSpace) {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+ from_space_->GetMemMap()->Protect(PROT_NONE);
} else {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+ from_space_->GetMemMap()->Protect(PROT_READ);
}
if (saved_bytes_ > 0) {
VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e6d2c29b6..fc591e76f9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -45,6 +45,7 @@
#include "gc/space/rosalloc_space-inl.h"
#include "gc/space/space-inl.h"
#include "gc/space/zygote_space.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "heap-inl.h"
#include "image.h"
#include "invoke_arg_array_builder.h"
@@ -65,8 +66,6 @@
namespace art {
-extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
-
namespace gc {
static constexpr bool kGCALotMode = false;
@@ -308,16 +307,102 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
void Heap::ChangeAllocator(AllocatorType allocator) {
- // These two allocators are only used internally and don't have any entrypoints.
- DCHECK_NE(allocator, kAllocatorTypeLOS);
- DCHECK_NE(allocator, kAllocatorTypeNonMoving);
if (current_allocator_ != allocator) {
+ // These two allocators are only used internally and don't have any entrypoints.
+ CHECK_NE(allocator, kAllocatorTypeLOS);
+ CHECK_NE(allocator, kAllocatorTypeNonMoving);
current_allocator_ = allocator;
+ MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
SetQuickAllocEntryPointsAllocator(current_allocator_);
Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
}
}
+std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
+ if (!IsValidContinuousSpaceObjectAddress(klass)) {
+ return StringPrintf("<non heap address klass %p>", klass);
+ }
+ mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
+ if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
+ std::string result("[");
+ result += SafeGetClassDescriptor(component_type);
+ return result;
+ } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
+ return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
+ } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
+ return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
+ } else {
+ mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
+ if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
+ return StringPrintf("<non heap address dex_cache %p>", dex_cache);
+ }
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint16_t class_def_idx = klass->GetDexClassDefIndex();
+ if (class_def_idx == DexFile::kDexNoIndex16) {
+ return "<class def not found>";
+ }
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
+ return dex_file->GetTypeDescriptor(type_id);
+ }
+}
+
+std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return "null";
+ }
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ if (klass == nullptr) {
+ return "(class=null)";
+ }
+ std::string result(SafeGetClassDescriptor(klass));
+ if (obj->IsClass()) {
+ result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
+ }
+ return result;
+}
+
+void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
+ if (obj == nullptr) {
+ stream << "(obj=null)";
+ return;
+ }
+ if (IsAligned<kObjectAlignment>(obj)) {
+ space::Space* space = nullptr;
+ // Don't use find space since it only finds spaces which actually contain objects instead of
+ // spaces which may contain objects (e.g. cleared bump pointer spaces).
+ for (const auto& cur_space : continuous_spaces_) {
+ if (cur_space->HasAddress(obj)) {
+ space = cur_space;
+ break;
+ }
+ }
+ if (space == nullptr) {
+ if (allocator_mem_map_.get() == nullptr || !allocator_mem_map_->HasAddress(obj)) {
+ stream << "obj " << obj << " not a valid heap address";
+ return;
+ } else if (allocator_mem_map_.get() != nullptr) {
+ allocator_mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+ // Unprotect all the spaces.
+ for (const auto& space : continuous_spaces_) {
+ mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+ }
+ stream << "Object " << obj;
+ if (space != nullptr) {
+ stream << " in space " << *space;
+ }
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ stream << "\nclass=" << klass;
+ if (klass != nullptr) {
+ stream << " type= " << SafePrettyTypeOf(obj);
+ }
+ // Re-protect the address we faulted on.
+ mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
+ }
+}
+
bool Heap::IsCompilingBoot() const {
for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace() || space->IsZygoteSpace()) {
@@ -809,16 +894,23 @@ bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
if (obj == nullptr) {
return true;
}
- return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+ return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
}
bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
return FindContinuousSpaceFromObject(obj, true) != nullptr;
}
-bool Heap::IsHeapAddress(const mirror::Object* obj) const {
- // TODO: This might not work for large objects.
- return FindSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
+ if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+ return false;
+ }
+ for (const auto& space : continuous_spaces_) {
+ if (space->HasAddress(obj)) {
+ return true;
+ }
+ }
+ return false;
}
bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
@@ -1539,6 +1631,7 @@ void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
void Heap::SwapSemiSpaces() {
// Swap the spaces so we allocate into the space which we just evacuated.
std::swap(bump_pointer_space_, temp_space_);
+ bump_pointer_space_->Clear();
}
void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -1616,7 +1709,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
CHECK(temp_space_->IsEmpty());
semi_space_collector_->SetFromSpace(bump_pointer_space_);
semi_space_collector_->SetToSpace(temp_space_);
- mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+ temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
collector = semi_space_collector_;
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2f227d0d37..88adf811c5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -197,13 +197,16 @@ class Heap {
void RegisterNativeFree(JNIEnv* env, int bytes);
// Change the allocator, updates entrypoints.
- void ChangeAllocator(AllocatorType allocator);
+ void ChangeAllocator(AllocatorType allocator)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
// Transition the garbage collector during runtime, may copy objects from one space to another.
void TransitionCollector(CollectorType collector_type);
// Change the collector to be one of the possible options (MS, CMS, SS).
- void ChangeCollector(CollectorType collector_type);
+ void ChangeCollector(CollectorType collector_type)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
// TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
@@ -223,9 +226,6 @@ class Heap {
bool IsValidObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns true if the address passed in is a heap address, doesn't need to be aligned.
- bool IsHeapAddress(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
@@ -468,7 +468,8 @@ class Heap {
// Revoke all the thread-local allocation stacks.
void RevokeAllThreadLocalAllocationStacks(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
// Mark all the objects in the allocation stack in the specified bitmap.
void MarkAllocStack(accounting::SpaceBitmap* bitmap1, accounting::SpaceBitmap* bitmap2,
@@ -519,6 +520,12 @@ class Heap {
void DumpSpaces(std::ostream& stream = LOG(INFO));
+ // Dump object should only be used by the signal handler.
+ void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ // Safe version of pretty type of which check to make sure objects are heap addresses.
+ std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
+ std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
@@ -600,6 +607,10 @@ class Heap {
template <bool kGrow>
bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+ // Returns true if the address passed in is within the address range of a continuous space.
+ bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Pushes a list of cleared references out to the managed heap.
void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 43674eacff..fcd3b70085 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -61,6 +61,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
void BumpPointerSpace::Clear() {
// Release the pages back to the operating system.
CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+}
+
+void BumpPointerSpace::Reset() {
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
@@ -75,8 +78,9 @@ void BumpPointerSpace::Clear() {
}
void BumpPointerSpace::Dump(std::ostream& os) const {
- os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
- << reinterpret_cast<void*>(Limit());
+ os << GetName() << " "
+ << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+ << reinterpret_cast<void*>(Limit());
}
mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 476b8333ff..2c9d35fa55 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -92,8 +92,11 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return nullptr;
}
- // Clear the memory and reset the pointer to the start of the space.
- void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+ // Madvise the memory back to the OS.
+ void Clear() OVERRIDE;
+
+ // Reset the pointer to the start of the space.
+ void Reset() OVERRIDE LOCKS_EXCLUDED(block_lock_);
void Dump(std::ostream& os) const;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index caedaaf1a9..b591486f59 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -281,12 +281,15 @@ uint64_t DlMallocSpace::GetObjectsAllocated() {
}
void DlMallocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void DlMallocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
#ifndef NDEBUG
void DlMallocSpace::CheckMoreCoreForPrecondition() {
lock_.AssertHeld(Thread::Current());
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ea10ad0a4..4bf16cecbe 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -113,6 +113,7 @@ class DlMallocSpace : public MallocSpace {
uint64_t GetObjectsAllocated() OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
bool IsDlMallocSpace() const OVERRIDE {
return true;
diff --git a/runtime/gc/space/dlmalloc_space_test.cc b/runtime/gc/space/dlmalloc_space_base_test.cc
index 964c31bde4..508d86998a 100644
--- a/runtime/gc/space/dlmalloc_space_test.cc
+++ b/runtime/gc/space/dlmalloc_space_base_test.cc
@@ -26,7 +26,7 @@ MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, s
return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
}
-TEST_SPACE_CREATE_FN(DlMallocSpace, CreateDlMallocSpace)
+TEST_SPACE_CREATE_FN_BASE(DlMallocSpace, CreateDlMallocSpace)
} // namespace space
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
new file mode 100644
index 0000000000..43a1bf0c0e
--- /dev/null
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_test.h"
+#include "dlmalloc_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+}
+
+TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
+
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
new file mode 100644
index 0000000000..4fbc81e6a1
--- /dev/null
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_test.h"
+#include "dlmalloc_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+}
+
+TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
+
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index fe8421d2b4..fb621ea6e4 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -304,12 +304,15 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
}
void RosAllocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void RosAllocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index bd321967e5..5bc425da63 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -80,6 +80,7 @@ class RosAllocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
byte* begin, byte* end, byte* limit, size_t growth_limit);
diff --git a/runtime/gc/space/rosalloc_space_test.cc b/runtime/gc/space/rosalloc_space_base_test.cc
index 3eac795370..df42076afa 100644
--- a/runtime/gc/space/rosalloc_space_test.cc
+++ b/runtime/gc/space/rosalloc_space_base_test.cc
@@ -26,7 +26,7 @@ MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, s
Runtime::Current()->GetHeap()->IsLowMemoryMode());
}
-TEST_SPACE_CREATE_FN(RosAllocSpace, CreateRosAllocSpace)
+TEST_SPACE_CREATE_FN_BASE(RosAllocSpace, CreateRosAllocSpace)
} // namespace space
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
new file mode 100644
index 0000000000..4d37c9eb01
--- /dev/null
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_test.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode());
+}
+
+TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
+
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
new file mode 100644
index 0000000000..9f11fd0491
--- /dev/null
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_test.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
+ return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
+ Runtime::Current()->GetHeap()->IsLowMemoryMode());
+}
+
+TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
+
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 0f8f38aa3c..37d7c80da1 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -399,6 +399,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Free all memory associated with this space.
virtual void Clear() = 0;
+ // Reset the space back to an empty space.
+ virtual void Reset() = 0;
+
accounting::SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index cb036f8772..413fc1dcf9 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -539,43 +539,63 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, Create
SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
}
-#define TEST_SizeFootPrintGrowthLimitAndTrim(name, spaceName, spaceFn, size) \
- TEST_F(spaceName##Test, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
+#define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
+ TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
- } \
- TEST_F(spaceName##Test, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
+ }
+
+#define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
+ TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
}
-#define TEST_SPACE_CREATE_FN(spaceName, spaceFn) \
- class spaceName##Test : public SpaceTest { \
+#define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
+ class spaceName##BaseTest : public SpaceTest { \
}; \
\
- TEST_F(spaceName##Test, Init) { \
+ TEST_F(spaceName##BaseTest, Init) { \
InitTestBody(spaceFn); \
} \
- TEST_F(spaceName##Test, ZygoteSpace) { \
+ TEST_F(spaceName##BaseTest, ZygoteSpace) { \
ZygoteSpaceTestBody(spaceFn); \
} \
- TEST_F(spaceName##Test, AllocAndFree) { \
+ TEST_F(spaceName##BaseTest, AllocAndFree) { \
AllocAndFreeTestBody(spaceFn); \
} \
- TEST_F(spaceName##Test, AllocAndFreeList) { \
+ TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
AllocAndFreeListTestBody(spaceFn); \
- } \
- TEST_F(spaceName##Test, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_12B) { \
- SizeFootPrintGrowthLimitAndTrimDriver(12, spaceFn); \
- } \
- TEST_SizeFootPrintGrowthLimitAndTrim(16B, spaceName, spaceFn, 16) \
- TEST_SizeFootPrintGrowthLimitAndTrim(24B, spaceName, spaceFn, 24) \
- TEST_SizeFootPrintGrowthLimitAndTrim(32B, spaceName, spaceFn, 32) \
- TEST_SizeFootPrintGrowthLimitAndTrim(64B, spaceName, spaceFn, 64) \
- TEST_SizeFootPrintGrowthLimitAndTrim(128B, spaceName, spaceFn, 128) \
- TEST_SizeFootPrintGrowthLimitAndTrim(1KB, spaceName, spaceFn, 1 * KB) \
- TEST_SizeFootPrintGrowthLimitAndTrim(4KB, spaceName, spaceFn, 4 * KB) \
- TEST_SizeFootPrintGrowthLimitAndTrim(1MB, spaceName, spaceFn, 1 * MB) \
- TEST_SizeFootPrintGrowthLimitAndTrim(4MB, spaceName, spaceFn, 4 * MB) \
- TEST_SizeFootPrintGrowthLimitAndTrim(8MB, spaceName, spaceFn, 8 * MB)
+ }
+
+#define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
+ class spaceName##StaticTest : public SpaceTest { \
+ }; \
+ \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
+
+#define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
+ class spaceName##RandomTest : public SpaceTest { \
+ }; \
+ \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
+ TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
} // namespace space
} // namespace gc
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a60ab38b10..d1c3d03758 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -61,6 +61,10 @@ void ZygoteSpace::Clear() {
LOG(FATAL) << "Unimplemented";
}
+void ZygoteSpace::Reset() {
+ LOG(FATAL) << "Unimplemented";
+}
+
ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyFullCollect),
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 8cd1a9f488..8880548e3e 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -71,7 +71,8 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_;
}
- void Clear();
+ void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
protected:
virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 9d051694b9..e10d881a09 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -23,6 +23,7 @@
#include "class_linker.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -41,8 +42,6 @@
namespace art {
-extern void SetQuickAllocEntryPointsInstrumented(bool instrumented);
-
namespace instrumentation {
const bool kVerboseInstrumentation = false;
@@ -80,9 +79,19 @@ static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code,
method->ClearIsPortableCompiled();
}
if (!method->IsResolutionMethod()) {
- if (quick_code == GetQuickToInterpreterBridge()) {
- DCHECK(portable_code == GetPortableToInterpreterBridge());
+ if (quick_code == GetQuickToInterpreterBridge() ||
+ (quick_code == GetQuickResolutionTrampoline(Runtime::Current()->GetClassLinker()) &&
+ Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()
+ && !method->IsNative() && !method->IsProxyMethod())) {
+ if (kIsDebugBuild) {
+ if (quick_code == GetQuickToInterpreterBridge()) {
+ DCHECK(portable_code == GetPortableToInterpreterBridge());
+ } else if (quick_code == GetQuickResolutionTrampoline(Runtime::Current()->GetClassLinker())) {
+ DCHECK(portable_code == GetPortableResolutionTrampoline(Runtime::Current()->GetClassLinker()));
+ }
+ }
DCHECK(!method->IsNative()) << PrettyMethod(method);
+ DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
method->SetEntryPointFromInterpreter(art::interpreter::artInterpreterToInterpreterBridge);
} else {
method->SetEntryPointFromInterpreter(art::artInterpreterToCompiledCodeBridge);
@@ -456,10 +465,13 @@ void Instrumentation::InstrumentQuickAllocEntryPoints() {
quick_alloc_entry_points_instrumentation_counter_.FetchAndAdd(1) == 0;
if (enable_instrumentation) {
// Instrumentation wasn't enabled so enable it.
- SetQuickAllocEntryPointsInstrumented(true);
ThreadList* tl = Runtime::Current()->GetThreadList();
tl->SuspendAll();
- ResetQuickAllocEntryPoints();
+ {
+ MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
+ SetQuickAllocEntryPointsInstrumented(true);
+ ResetQuickAllocEntryPoints();
+ }
tl->ResumeAll();
}
}
@@ -471,10 +483,13 @@ void Instrumentation::UninstrumentQuickAllocEntryPoints() {
const bool disable_instrumentation =
quick_alloc_entry_points_instrumentation_counter_.FetchAndSub(1) == 1;
if (disable_instrumentation) {
- SetQuickAllocEntryPointsInstrumented(false);
ThreadList* tl = Runtime::Current()->GetThreadList();
tl->SuspendAll();
- ResetQuickAllocEntryPoints();
+ {
+ MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
+ SetQuickAllocEntryPointsInstrumented(false);
+ ResetQuickAllocEntryPoints();
+ }
tl->ResumeAll();
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 1ce72bd6a6..d7a0b4d3a7 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -167,9 +167,11 @@ class Instrumentation {
return interpreter_handler_table_;
}
- void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- void ResetQuickAllocEntryPoints();
+ void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::runtime_shutdown_lock_);
+ void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::runtime_shutdown_lock_);
+ void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
@@ -192,6 +194,10 @@ class Instrumentation {
return interpret_only_;
}
+ bool IsForcedInterpretOnly() const {
+ return forced_interpret_only_;
+ }
+
bool ShouldPortableCodeDeoptimize() const {
return instrumentation_stubs_installed_;
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 40d4ea37d9..abe7fe1343 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -286,11 +286,39 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
enum InterpreterImplKind {
- kSwitchImpl, // switch-based interpreter implementation.
- kComputedGotoImplKind // computed-goto-based interpreter implementation.
+ kSwitchImpl, // Switch-based interpreter implementation.
+ kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
-static const InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
+#if !defined(__clang__)
+static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
+#else
+// Clang 3.4 fails to build the goto interpreter implementation.
+static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
+template<bool do_access_check, bool transaction_active>
+JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register) {
+ LOG(FATAL) << "UNREACHABLE";
+ exit(0);
+}
+// Explicit definitions of ExecuteGotoImpl.
+template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+JValue ExecuteGotoImpl<true, false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+JValue ExecuteGotoImpl<false, false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+JValue ExecuteGotoImpl<true, true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+JValue ExecuteGotoImpl<false, true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+#endif
static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register)
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index f76d50c873..e8cea9d833 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -155,6 +155,11 @@ bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame,
if (kIsDebugBuild && method->GetEntryPointFromInterpreter() == nullptr) {
LOG(FATAL) << "Attempt to invoke non-executable method: " << PrettyMethod(method);
}
+ if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() &&
+ !method->IsNative() && !method->IsProxyMethod() &&
+ method->GetEntryPointFromInterpreter() == artInterpreterToCompiledCodeBridge) {
+ LOG(FATAL) << "Attempt to call compiled code when -Xint: " << PrettyMethod(method);
+ }
(method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
} else {
UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, first_dest_reg);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a03e420514..589e0b042a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -515,11 +515,11 @@ static inline uint32_t FindNextInstructionFollowingException(Thread* self,
return found_dex_pc;
}
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- __attribute__((cold, noreturn, noinline));
+static inline void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
+ __attribute__((cold, noreturn))
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static inline void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh) {
LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
exit(0); // Unreachable, keep GCC happy.
}
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 1ec795f103..fdbdfeb3b1 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -96,9 +96,9 @@ struct JdwpOptions {
};
struct JdwpEvent;
-struct JdwpNetStateBase;
+class JdwpNetStateBase;
struct ModBasket;
-struct Request;
+class Request;
/*
* State for JDWP functions.
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 2db0f5ffc9..1bcb8dd3bc 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -40,6 +40,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
+#include "parsed_options.h"
#include "runtime.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
@@ -104,7 +105,8 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
if (argument != nullptr && !argument->InstanceOf(param_type)) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
- << PrettyTypeOf(argument) << " as argument " << (i + 1) << " to " << PrettyMethod(m);
+ << PrettyTypeOf(argument) << " as argument " << (i + 1)
+ << " to " << PrettyMethod(m);
++error_count;
}
} else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
@@ -114,7 +116,8 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
if (error_count > 0) {
// TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
// with an argument.
- JniAbortF(nullptr, "bad arguments passed to %s (see above for details)", PrettyMethod(m).c_str());
+ JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
+ PrettyMethod(m).c_str());
}
}
@@ -294,8 +297,8 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
SirtRef<mirror::Throwable> cause(soa.Self(), soa.Self()->GetException(&throw_location));
soa.Self()->ClearException();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
- "no type \"%s\" found and so no field \"%s\" could be found in class "
- "\"%s\" or its superclasses", sig, name,
+ "no type \"%s\" found and so no field \"%s\" "
+ "could be found in class \"%s\" or its superclasses", sig, name,
ClassHelper(c.get()).GetDescriptor());
soa.Self()->GetException(nullptr)->SetCause(cause.get());
return nullptr;
@@ -782,7 +785,8 @@ class JNI {
old_throw_dex_pc = old_throw_location.GetDexPc();
soa.Self()->ClearException();
}
- ScopedLocalRef<jthrowable> exception(env, soa.AddLocalReference<jthrowable>(old_exception.get()));
+ ScopedLocalRef<jthrowable> exception(env,
+ soa.AddLocalReference<jthrowable>(old_exception.get()));
ScopedLocalRef<jclass> exception_class(env, env->GetObjectClass(exception.get()));
jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V");
if (mid == nullptr) {
@@ -905,7 +909,8 @@ class JNI {
return JNI_TRUE;
} else {
ScopedObjectAccess soa(env);
- return (soa.Decode<mirror::Object*>(obj1) == soa.Decode<mirror::Object*>(obj2)) ? JNI_TRUE : JNI_FALSE;
+ return (soa.Decode<mirror::Object*>(obj1) == soa.Decode<mirror::Object*>(obj2))
+ ? JNI_TRUE : JNI_FALSE;
}
}
@@ -2334,7 +2339,8 @@ class JNI {
static void SetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length,
const jboolean* buf) {
ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(soa, array, start,
+ length, buf);
}
static void SetByteArrayRegion(JNIEnv* env, jbyteArray array, jsize start, jsize length,
@@ -2352,13 +2358,15 @@ class JNI {
static void SetDoubleArrayRegion(JNIEnv* env, jdoubleArray array, jsize start, jsize length,
const jdouble* buf) {
ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(soa, array, start, length,
+ buf);
}
static void SetFloatArrayRegion(JNIEnv* env, jfloatArray array, jsize start, jsize length,
const jfloat* buf) {
ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(soa, array, start, length,
+ buf);
}
static void SetIntArrayRegion(JNIEnv* env, jintArray array, jsize start, jsize length,
@@ -2556,7 +2564,8 @@ class JNI {
ScopedObjectAccess soa(env);
if (soa.Decode<mirror::Object*>(java_object) ==
reinterpret_cast<mirror::Object*>(java_object)) {
- if (soa.Env()->locals.ContainsDirectPointer(reinterpret_cast<mirror::Object*>(java_object))) {
+ mirror::Object* object = reinterpret_cast<mirror::Object*>(java_object);
+ if (soa.Env()->locals.ContainsDirectPointer(object)) {
return JNILocalRefType;
}
}
@@ -3086,7 +3095,7 @@ const JNIInvokeInterface gJniInvokeInterface = {
JII::AttachCurrentThreadAsDaemon
};
-JavaVMExt::JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options)
+JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
: runtime(runtime),
check_jni_abort_hook(nullptr),
check_jni_abort_hook_data(nullptr),
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 9e109875cd..606d5d1311 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -46,6 +46,7 @@ namespace mirror {
class ArgArray;
union JValue;
class Libraries;
+class ParsedOptions;
class ScopedObjectAccess;
class Thread;
@@ -64,7 +65,7 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj
class JavaVMExt : public JavaVM {
public:
- JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options);
+ JavaVMExt(Runtime* runtime, ParsedOptions* options);
~JavaVMExt();
/**
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index e82c39389d..e3f4eedbe0 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -49,8 +49,9 @@ inline ClassLoader* Class::GetClassLoader() {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false);
}
+template<VerifyObjectFlags kVerifyFlags>
inline DexCache* Class::GetDexCache() {
- return GetFieldObject<DexCache>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false);
+ return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false);
}
inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 43db996642..f9a5ea2b91 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -276,11 +276,13 @@ class MANAGED Class : public Object {
// Computes the name, then sets the cached value.
String* ComputeName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
- uint32_t access_flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
+ uint32_t access_flags = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_),
+ false);
return (access_flags & kAccClassIsProxy) != 0;
}
@@ -567,6 +569,7 @@ class MANAGED Class : public Object {
void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 478cc36fb6..484c21a02f 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -323,9 +323,9 @@ inline size_t Object::SizeOf() {
size_t result;
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
if (IsArrayInstance<kVerifyFlags>()) {
- result = AsArray<kNewFlags>()->SizeOf<>();
+ result = AsArray<kNewFlags>()->template SizeOf<kNewFlags>();
} else if (IsClass<kNewFlags>()) {
- result = AsClass<kNewFlags>()->SizeOf<kNewFlags>();
+ result = AsClass<kNewFlags>()->template SizeOf<kNewFlags>();
} else {
result = GetClass<kNewFlags>()->GetObjectSize();
}
@@ -485,7 +485,6 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(new_value);
}
- HeapReference<Object> objref(HeapReference<Object>::FromMirrorPtr(new_value));
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
if (UNLIKELY(is_volatile)) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 3c703ba476..d9bb121025 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -56,13 +56,17 @@ static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) {
static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaElementClass,
jint length) {
ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(length < 0)) {
+ ThrowNegativeArraySizeException(length);
+ return nullptr;
+ }
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
ThrowNullPointerException(NULL, "element class == null");
return nullptr;
}
- if (UNLIKELY(length < 0)) {
- ThrowNegativeArraySizeException(length);
+ if (UNLIKELY(element_class->IsPrimitiveVoid())) {
+ ThrowIllegalArgumentException(NULL, "Can't allocate an array of void");
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -76,6 +80,34 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
return soa.AddLocalReference<jobject>(result);
}
+static jobject VMRuntime_newUnpaddedArray(JNIEnv* env, jobject, jclass javaElementClass,
+ jint length) {
+ ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(length < 0)) {
+ ThrowNegativeArraySizeException(length);
+ return nullptr;
+ }
+ mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
+ if (UNLIKELY(element_class == nullptr)) {
+ ThrowNullPointerException(NULL, "element class == null");
+ return nullptr;
+ }
+ if (UNLIKELY(element_class->IsPrimitiveVoid())) {
+ ThrowIllegalArgumentException(NULL, "Can't allocate an array of void");
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
+ if (UNLIKELY(array_class == nullptr)) {
+ return nullptr;
+ }
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ mirror::Array* result = mirror::Array::Alloc<true>(soa.Self(), array_class, length,
+ array_class->GetComponentSize(), allocator,
+ true);
+ return soa.AddLocalReference<jobject>(result);
+}
+
static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
if (javaArray == NULL) { // Most likely allocation failed
return 0;
@@ -123,7 +155,7 @@ static jstring VMRuntime_classPath(JNIEnv* env, jobject) {
}
static jstring VMRuntime_vmVersion(JNIEnv* env, jobject) {
- return env->NewStringUTF(Runtime::Current()->GetVersion());
+ return env->NewStringUTF(Runtime::GetVersion());
}
static jstring VMRuntime_vmLibrary(JNIEnv* env, jobject) {
@@ -497,6 +529,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
+ NATIVE_METHOD(VMRuntime, newUnpaddedArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 945cd77703..c8eb3e27ae 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '1', '5', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '1', '6', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
@@ -67,6 +67,7 @@ OatHeader::OatHeader(InstructionSet instruction_set,
portable_imt_conflict_trampoline_offset_ = 0;
portable_resolution_trampoline_offset_ = 0;
portable_to_interpreter_bridge_offset_ = 0;
+ quick_generic_jni_trampoline_offset_ = 0;
quick_imt_conflict_trampoline_offset_ = 0;
quick_resolution_trampoline_offset_ = 0;
quick_to_interpreter_bridge_offset_ = 0;
@@ -239,18 +240,37 @@ void OatHeader::SetPortableToInterpreterBridgeOffset(uint32_t offset) {
UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset));
}
+const void* OatHeader::GetQuickGenericJniTrampoline() const {
+ return reinterpret_cast<const uint8_t*>(this) + GetQuickGenericJniTrampolineOffset();
+}
+
+uint32_t OatHeader::GetQuickGenericJniTrampolineOffset() const {
+ DCHECK(IsValid());
+ CHECK_GE(quick_generic_jni_trampoline_offset_, portable_to_interpreter_bridge_offset_);
+ return quick_generic_jni_trampoline_offset_;
+}
+
+void OatHeader::SetQuickGenericJniTrampolineOffset(uint32_t offset) {
+ CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
+ DCHECK(IsValid());
+ DCHECK_EQ(quick_generic_jni_trampoline_offset_, 0U) << offset;
+
+ quick_generic_jni_trampoline_offset_ = offset;
+ UpdateChecksum(&quick_generic_jni_trampoline_offset_, sizeof(offset));
+}
+
const void* OatHeader::GetQuickImtConflictTrampoline() const {
return reinterpret_cast<const uint8_t*>(this) + GetQuickImtConflictTrampolineOffset();
}
uint32_t OatHeader::GetQuickImtConflictTrampolineOffset() const {
DCHECK(IsValid());
- CHECK_GE(quick_imt_conflict_trampoline_offset_, portable_to_interpreter_bridge_offset_);
+ CHECK_GE(quick_imt_conflict_trampoline_offset_, quick_generic_jni_trampoline_offset_);
return quick_imt_conflict_trampoline_offset_;
}
void OatHeader::SetQuickImtConflictTrampolineOffset(uint32_t offset) {
- CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
+ CHECK(offset == 0 || offset >= quick_generic_jni_trampoline_offset_);
DCHECK(IsValid());
DCHECK_EQ(quick_imt_conflict_trampoline_offset_, 0U) << offset;
diff --git a/runtime/oat.h b/runtime/oat.h
index de840b5870..2851f5c14d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -70,6 +70,9 @@ class PACKED(4) OatHeader {
uint32_t GetPortableToInterpreterBridgeOffset() const;
void SetPortableToInterpreterBridgeOffset(uint32_t offset);
+ const void* GetQuickGenericJniTrampoline() const;
+ uint32_t GetQuickGenericJniTrampolineOffset() const;
+ void SetQuickGenericJniTrampolineOffset(uint32_t offset);
const void* GetQuickResolutionTrampoline() const;
uint32_t GetQuickResolutionTrampolineOffset() const;
void SetQuickResolutionTrampolineOffset(uint32_t offset);
@@ -103,6 +106,7 @@ class PACKED(4) OatHeader {
uint32_t portable_imt_conflict_trampoline_offset_;
uint32_t portable_resolution_trampoline_offset_;
uint32_t portable_to_interpreter_bridge_offset_;
+ uint32_t quick_generic_jni_trampoline_offset_;
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
new file mode 100644
index 0000000000..c5b1c4b558
--- /dev/null
+++ b/runtime/parsed_options.cc
@@ -0,0 +1,739 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "parsed_options.h"
+
+#include "debugger.h"
+#include "monitor.h"
+
+namespace art {
+
+ParsedOptions* ParsedOptions::Create(const Runtime::Options& options, bool ignore_unrecognized) {
+ UniquePtr<ParsedOptions> parsed(new ParsedOptions());
+ if (parsed->Parse(options, ignore_unrecognized)) {
+ return parsed.release();
+ }
+ return nullptr;
+}
+
+// Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify
+// memory sizes. [kK] indicates kilobytes, [mM] megabytes, and
+// [gG] gigabytes.
+//
+// "s" should point just past the "-Xm?" part of the string.
+// "div" specifies a divisor, e.g. 1024 if the value must be a multiple
+// of 1024.
+//
+// The spec says the -Xmx and -Xms options must be multiples of 1024. It
+// doesn't say anything about -Xss.
+//
+// Returns 0 (a useless size) if "s" is malformed or specifies a low or
+// non-evenly-divisible value.
+//
+size_t ParseMemoryOption(const char* s, size_t div) {
+ // strtoul accepts a leading [+-], which we don't want,
+ // so make sure our string starts with a decimal digit.
+ if (isdigit(*s)) {
+ char* s2;
+ size_t val = strtoul(s, &s2, 10);
+ if (s2 != s) {
+ // s2 should be pointing just after the number.
+ // If this is the end of the string, the user
+ // has specified a number of bytes. Otherwise,
+ // there should be exactly one more character
+ // that specifies a multiplier.
+ if (*s2 != '\0') {
+ // The remainder of the string is either a single multiplier
+ // character, or nothing to indicate that the value is in
+ // bytes.
+ char c = *s2++;
+ if (*s2 == '\0') {
+ size_t mul;
+ if (c == '\0') {
+ mul = 1;
+ } else if (c == 'k' || c == 'K') {
+ mul = KB;
+ } else if (c == 'm' || c == 'M') {
+ mul = MB;
+ } else if (c == 'g' || c == 'G') {
+ mul = GB;
+ } else {
+ // Unknown multiplier character.
+ return 0;
+ }
+
+ if (val <= std::numeric_limits<size_t>::max() / mul) {
+ val *= mul;
+ } else {
+ // Clamp to a multiple of 1024.
+ val = std::numeric_limits<size_t>::max() & ~(1024-1);
+ }
+ } else {
+ // There's more than one character after the numeric part.
+ return 0;
+ }
+ }
+ // The man page says that a -Xm value must be a multiple of 1024.
+ if (val % div == 0) {
+ return val;
+ }
+ }
+ }
+ return 0;
+}
+
+static gc::CollectorType ParseCollectorType(const std::string& option) {
+ if (option == "MS" || option == "nonconcurrent") {
+ return gc::kCollectorTypeMS;
+ } else if (option == "CMS" || option == "concurrent") {
+ return gc::kCollectorTypeCMS;
+ } else if (option == "SS") {
+ return gc::kCollectorTypeSS;
+ } else if (option == "GSS") {
+ return gc::kCollectorTypeGSS;
+ } else {
+ return gc::kCollectorTypeNone;
+ }
+}
+
+bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecognized) {
+ const char* boot_class_path_string = getenv("BOOTCLASSPATH");
+ if (boot_class_path_string != NULL) {
+ boot_class_path_string_ = boot_class_path_string;
+ }
+ const char* class_path_string = getenv("CLASSPATH");
+ if (class_path_string != NULL) {
+ class_path_string_ = class_path_string;
+ }
+ // -Xcheck:jni is off by default for regular builds but on by default in debug builds.
+ check_jni_ = kIsDebugBuild;
+
+ heap_initial_size_ = gc::Heap::kDefaultInitialSize;
+ heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
+ heap_min_free_ = gc::Heap::kDefaultMinFree;
+ heap_max_free_ = gc::Heap::kDefaultMaxFree;
+ heap_target_utilization_ = gc::Heap::kDefaultTargetUtilization;
+ heap_growth_limit_ = 0; // 0 means no growth limit .
+ // Default to number of processors minus one since the main GC thread also does work.
+ parallel_gc_threads_ = sysconf(_SC_NPROCESSORS_CONF) - 1;
+ // Only the main GC thread, no workers.
+ conc_gc_threads_ = 0;
+ // Default is CMS which is Sticky + Partial + Full CMS GC.
+ collector_type_ = gc::kCollectorTypeCMS;
+ // If background_collector_type_ is kCollectorTypeNone, it defaults to the collector_type_ after
+ // parsing options.
+ background_collector_type_ = gc::kCollectorTypeNone;
+ stack_size_ = 0; // 0 means default.
+ max_spins_before_thin_lock_inflation_ = Monitor::kDefaultMaxSpinsBeforeThinLockInflation;
+ low_memory_mode_ = false;
+ use_tlab_ = false;
+ verify_pre_gc_heap_ = false;
+ verify_post_gc_heap_ = kIsDebugBuild;
+ verify_pre_gc_rosalloc_ = kIsDebugBuild;
+ verify_post_gc_rosalloc_ = false;
+
+ compiler_callbacks_ = nullptr;
+ is_zygote_ = false;
+ interpreter_only_ = false;
+ is_explicit_gc_disabled_ = false;
+
+ long_pause_log_threshold_ = gc::Heap::kDefaultLongPauseLogThreshold;
+ long_gc_log_threshold_ = gc::Heap::kDefaultLongGCLogThreshold;
+ dump_gc_performance_on_shutdown_ = false;
+ ignore_max_footprint_ = false;
+
+ lock_profiling_threshold_ = 0;
+ hook_is_sensitive_thread_ = NULL;
+
+ hook_vfprintf_ = vfprintf;
+ hook_exit_ = exit;
+ hook_abort_ = NULL; // We don't call abort(3) by default; see Runtime::Abort.
+
+// gLogVerbosity.class_linker = true; // TODO: don't check this in!
+// gLogVerbosity.compiler = true; // TODO: don't check this in!
+// gLogVerbosity.verifier = true; // TODO: don't check this in!
+// gLogVerbosity.heap = true; // TODO: don't check this in!
+// gLogVerbosity.gc = true; // TODO: don't check this in!
+// gLogVerbosity.jdwp = true; // TODO: don't check this in!
+// gLogVerbosity.jni = true; // TODO: don't check this in!
+// gLogVerbosity.monitor = true; // TODO: don't check this in!
+// gLogVerbosity.startup = true; // TODO: don't check this in!
+// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
+// gLogVerbosity.threads = true; // TODO: don't check this in!
+
+ method_trace_ = false;
+ method_trace_file_ = "/data/method-trace-file.bin";
+ method_trace_file_size_ = 10 * MB;
+
+ profile_ = false;
+ profile_period_s_ = 10; // Seconds.
+ profile_duration_s_ = 20; // Seconds.
+ profile_interval_us_ = 500; // Microseconds.
+ profile_backoff_coefficient_ = 2.0;
+ profile_clock_source_ = kDefaultProfilerClockSource;
+
+ for (size_t i = 0; i < options.size(); ++i) {
+ const std::string option(options[i].first);
+ if (true && options[0].first == "-Xzygote") {
+ LOG(INFO) << "option[" << i << "]=" << option;
+ }
+ if (StartsWith(option, "-help")) {
+ Usage(nullptr);
+ return false;
+ } else if (StartsWith(option, "-showversion")) {
+ UsageMessage(stdout, "ART version %s\n", Runtime::GetVersion());
+ Exit(0);
+ } else if (StartsWith(option, "-Xbootclasspath:")) {
+ boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
+ } else if (option == "-classpath" || option == "-cp") {
+ // TODO: support -Djava.class.path
+ i++;
+ if (i == options.size()) {
+ Usage("Missing required class path value for %s", option.c_str());
+ return false;
+ }
+ const StringPiece& value = options[i].first;
+ class_path_string_ = value.data();
+ } else if (option == "bootclasspath") {
+ boot_class_path_
+ = reinterpret_cast<const std::vector<const DexFile*>*>(options[i].second);
+ } else if (StartsWith(option, "-Ximage:")) {
+ if (!ParseStringAfterChar(option, ':', &image_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xcheck:jni")) {
+ check_jni_ = true;
+ } else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) {
+ std::string tail(option.substr(option[1] == 'X' ? 10 : 15));
+ // TODO: move parsing logic out of Dbg
+ if (tail == "help" || !Dbg::ParseJdwpOptions(tail)) {
+ if (tail != "help") {
+ UsageMessage(stderr, "Failed to parse JDWP option %s\n", tail.c_str());
+ }
+ Usage("Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n"
+ "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n\n");
+ return false;
+ }
+ } else if (StartsWith(option, "-Xms")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-Xms")).c_str(), 1024);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ heap_initial_size_ = size;
+ } else if (StartsWith(option, "-Xmx")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-Xmx")).c_str(), 1024);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ heap_maximum_size_ = size;
+ } else if (StartsWith(option, "-XX:HeapGrowthLimit=")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapGrowthLimit=")).c_str(), 1024);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ heap_growth_limit_ = size;
+ } else if (StartsWith(option, "-XX:HeapMinFree=")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMinFree=")).c_str(), 1024);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ heap_min_free_ = size;
+ } else if (StartsWith(option, "-XX:HeapMaxFree=")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMaxFree=")).c_str(), 1024);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ heap_max_free_ = size;
+ } else if (StartsWith(option, "-XX:HeapTargetUtilization=")) {
+ if (!ParseDouble(option, '=', 0.1, 0.9, &heap_target_utilization_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-XX:ParallelGCThreads=")) {
+ if (!ParseUnsignedInteger(option, '=', &parallel_gc_threads_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-XX:ConcGCThreads=")) {
+ if (!ParseUnsignedInteger(option, '=', &conc_gc_threads_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xss")) {
+ size_t size = ParseMemoryOption(option.substr(strlen("-Xss")).c_str(), 1);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s", option.c_str());
+ return false;
+ }
+ stack_size_ = size;
+ } else if (StartsWith(option, "-XX:MaxSpinsBeforeThinLockInflation=")) {
+ if (!ParseUnsignedInteger(option, '=', &max_spins_before_thin_lock_inflation_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-XX:LongPauseLogThreshold=")) {
+ size_t value;
+ if (!ParseUnsignedInteger(option, '=', &value)) {
+ return false;
+ }
+ long_pause_log_threshold_ = MsToNs(value);
+ } else if (StartsWith(option, "-XX:LongGCLogThreshold=")) {
+ size_t value;
+ if (!ParseUnsignedInteger(option, '=', &value)) {
+ return false;
+ }
+ long_gc_log_threshold_ = MsToNs(value);
+ } else if (option == "-XX:DumpGCPerformanceOnShutdown") {
+ dump_gc_performance_on_shutdown_ = true;
+ } else if (option == "-XX:IgnoreMaxFootprint") {
+ ignore_max_footprint_ = true;
+ } else if (option == "-XX:LowMemoryMode") {
+ low_memory_mode_ = true;
+ } else if (option == "-XX:UseTLAB") {
+ use_tlab_ = true;
+ } else if (StartsWith(option, "-D")) {
+ properties_.push_back(option.substr(strlen("-D")));
+ } else if (StartsWith(option, "-Xjnitrace:")) {
+ jni_trace_ = option.substr(strlen("-Xjnitrace:"));
+ } else if (option == "compilercallbacks") {
+ compiler_callbacks_ =
+ reinterpret_cast<CompilerCallbacks*>(const_cast<void*>(options[i].second));
+ } else if (option == "-Xzygote") {
+ is_zygote_ = true;
+ } else if (option == "-Xint") {
+ interpreter_only_ = true;
+ } else if (StartsWith(option, "-Xgc:")) {
+ std::vector<std::string> gc_options;
+ Split(option.substr(strlen("-Xgc:")), ',', gc_options);
+ for (const std::string& gc_option : gc_options) {
+ gc::CollectorType collector_type = ParseCollectorType(gc_option);
+ if (collector_type != gc::kCollectorTypeNone) {
+ collector_type_ = collector_type;
+ } else if (gc_option == "preverify") {
+ verify_pre_gc_heap_ = true;
+ } else if (gc_option == "nopreverify") {
+ verify_pre_gc_heap_ = false;
+ } else if (gc_option == "postverify") {
+ verify_post_gc_heap_ = true;
+ } else if (gc_option == "nopostverify") {
+ verify_post_gc_heap_ = false;
+ } else if (gc_option == "preverify_rosalloc") {
+ verify_pre_gc_rosalloc_ = true;
+ } else if (gc_option == "nopreverify_rosalloc") {
+ verify_pre_gc_rosalloc_ = false;
+ } else if (gc_option == "postverify_rosalloc") {
+ verify_post_gc_rosalloc_ = true;
+ } else if (gc_option == "nopostverify_rosalloc") {
+ verify_post_gc_rosalloc_ = false;
+ } else if ((gc_option == "precise") ||
+ (gc_option == "noprecise") ||
+ (gc_option == "verifycardtable") ||
+ (gc_option == "noverifycardtable")) {
+ // Ignored for backwards compatibility.
+ } else {
+ Usage("Unknown -Xgc option %s", gc_option.c_str());
+ return false;
+ }
+ }
+ } else if (StartsWith(option, "-XX:BackgroundGC=")) {
+ std::string substring;
+ if (!ParseStringAfterChar(option, '=', &substring)) {
+ return false;
+ }
+ gc::CollectorType collector_type = ParseCollectorType(substring);
+ if (collector_type != gc::kCollectorTypeNone) {
+ background_collector_type_ = collector_type;
+ } else {
+ Usage("Unknown -XX:BackgroundGC option %s", substring.c_str());
+ return false;
+ }
+ } else if (option == "-XX:+DisableExplicitGC") {
+ is_explicit_gc_disabled_ = true;
+ } else if (StartsWith(option, "-verbose:")) {
+ std::vector<std::string> verbose_options;
+ Split(option.substr(strlen("-verbose:")), ',', verbose_options);
+ for (size_t i = 0; i < verbose_options.size(); ++i) {
+ if (verbose_options[i] == "class") {
+ gLogVerbosity.class_linker = true;
+ } else if (verbose_options[i] == "verifier") {
+ gLogVerbosity.verifier = true;
+ } else if (verbose_options[i] == "compiler") {
+ gLogVerbosity.compiler = true;
+ } else if (verbose_options[i] == "heap") {
+ gLogVerbosity.heap = true;
+ } else if (verbose_options[i] == "gc") {
+ gLogVerbosity.gc = true;
+ } else if (verbose_options[i] == "jdwp") {
+ gLogVerbosity.jdwp = true;
+ } else if (verbose_options[i] == "jni") {
+ gLogVerbosity.jni = true;
+ } else if (verbose_options[i] == "monitor") {
+ gLogVerbosity.monitor = true;
+ } else if (verbose_options[i] == "startup") {
+ gLogVerbosity.startup = true;
+ } else if (verbose_options[i] == "third-party-jni") {
+ gLogVerbosity.third_party_jni = true;
+ } else if (verbose_options[i] == "threads") {
+ gLogVerbosity.threads = true;
+ } else {
+ Usage("Unknown -verbose option %s", verbose_options[i].c_str());
+ return false;
+ }
+ }
+ } else if (StartsWith(option, "-Xlockprofthreshold:")) {
+ if (!ParseUnsignedInteger(option, ':', &lock_profiling_threshold_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xstacktracefile:")) {
+ if (!ParseStringAfterChar(option, ':', &stack_trace_file_)) {
+ return false;
+ }
+ } else if (option == "sensitiveThread") {
+ const void* hook = options[i].second;
+ hook_is_sensitive_thread_ = reinterpret_cast<bool (*)()>(const_cast<void*>(hook));
+ } else if (option == "vfprintf") {
+ const void* hook = options[i].second;
+ if (hook == nullptr) {
+ Usage("vfprintf argument was NULL");
+ return false;
+ }
+ hook_vfprintf_ =
+ reinterpret_cast<int (*)(FILE *, const char*, va_list)>(const_cast<void*>(hook));
+ } else if (option == "exit") {
+ const void* hook = options[i].second;
+ if (hook == nullptr) {
+ Usage("exit argument was NULL");
+ return false;
+ }
+ hook_exit_ = reinterpret_cast<void(*)(jint)>(const_cast<void*>(hook));
+ } else if (option == "abort") {
+ const void* hook = options[i].second;
+ if (hook == nullptr) {
+ Usage("abort was NULL");
+ return false;
+ }
+ hook_abort_ = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
+ } else if (option == "host-prefix") {
+ host_prefix_ = reinterpret_cast<const char*>(options[i].second);
+ } else if (option == "-Xmethod-trace") {
+ method_trace_ = true;
+ } else if (StartsWith(option, "-Xmethod-trace-file:")) {
+ method_trace_file_ = option.substr(strlen("-Xmethod-trace-file:"));
+ } else if (StartsWith(option, "-Xmethod-trace-file-size:")) {
+ if (!ParseUnsignedInteger(option, ':', &method_trace_file_size_)) {
+ return false;
+ }
+ } else if (option == "-Xprofile:threadcpuclock") {
+ Trace::SetDefaultClockSource(kProfilerClockSourceThreadCpu);
+ } else if (option == "-Xprofile:wallclock") {
+ Trace::SetDefaultClockSource(kProfilerClockSourceWall);
+ } else if (option == "-Xprofile:dualclock") {
+ Trace::SetDefaultClockSource(kProfilerClockSourceDual);
+ } else if (StartsWith(option, "-Xprofile:")) {
+ if (!ParseStringAfterChar(option, ';', &profile_output_filename_)) {
+ return false;
+ }
+ profile_ = true;
+ } else if (StartsWith(option, "-Xprofile-period:")) {
+ if (!ParseUnsignedInteger(option, ':', &profile_period_s_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xprofile-duration:")) {
+ if (!ParseUnsignedInteger(option, ':', &profile_duration_s_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xprofile-interval:")) {
+ if (!ParseUnsignedInteger(option, ':', &profile_interval_us_)) {
+ return false;
+ }
+ } else if (StartsWith(option, "-Xprofile-backoff:")) {
+ if (!ParseDouble(option, ':', 1.0, 10.0, &profile_backoff_coefficient_)) {
+ return false;
+ }
+ } else if (option == "-Xcompiler-option") {
+ i++;
+ if (i == options.size()) {
+ Usage("Missing required compiler option for %s", option.c_str());
+ return false;
+ }
+ compiler_options_.push_back(options[i].first);
+ } else if (option == "-Ximage-compiler-option") {
+ i++;
+ if (i == options.size()) {
+ Usage("Missing required compiler option for %s", option.c_str());
+ return false;
+ }
+ image_compiler_options_.push_back(options[i].first);
+ } else if (StartsWith(option, "-ea:") ||
+ StartsWith(option, "-da:") ||
+ StartsWith(option, "-enableassertions:") ||
+ StartsWith(option, "-disableassertions:") ||
+ (option == "-esa") ||
+ (option == "-dsa") ||
+ (option == "-enablesystemassertions") ||
+ (option == "-disablesystemassertions") ||
+ StartsWith(option, "-Xverify:") ||
+ (option == "-Xrs") ||
+ StartsWith(option, "-Xint:") ||
+ StartsWith(option, "-Xdexopt:") ||
+ (option == "-Xnoquithandler") ||
+ StartsWith(option, "-Xjniopts:") ||
+ StartsWith(option, "-Xjnigreflimit:") ||
+ (option == "-Xgenregmap") ||
+ (option == "-Xnogenregmap") ||
+ StartsWith(option, "-Xverifyopt:") ||
+ (option == "-Xcheckdexsum") ||
+ (option == "-Xincludeselectedop") ||
+ StartsWith(option, "-Xjitop:") ||
+ (option == "-Xincludeselectedmethod") ||
+ StartsWith(option, "-Xjitthreshold:") ||
+ StartsWith(option, "-Xjitcodecachesize:") ||
+ (option == "-Xjitblocking") ||
+ StartsWith(option, "-Xjitmethod:") ||
+ StartsWith(option, "-Xjitclass:") ||
+ StartsWith(option, "-Xjitoffset:") ||
+ StartsWith(option, "-Xjitconfig:") ||
+ (option == "-Xjitcheckcg") ||
+ (option == "-Xjitverbose") ||
+ (option == "-Xjitprofile") ||
+ (option == "-Xjitdisableopt") ||
+ (option == "-Xjitsuspendpoll") ||
+ StartsWith(option, "-XX:mainThreadStackSize=")) {
+ // Ignored for backwards compatibility.
+ } else if (!ignore_unrecognized) {
+ Usage("Unrecognized option %s", option.c_str());
+ return false;
+ }
+ }
+
+ // If a reference to the dalvik core.jar snuck in, replace it with
+ // the art specific version. This can happen with on device
+ // boot.art/boot.oat generation by GenerateImage which relies on the
+ // value of BOOTCLASSPATH.
+ std::string core_jar("/core.jar");
+ size_t core_jar_pos = boot_class_path_string_.find(core_jar);
+ if (core_jar_pos != std::string::npos) {
+ boot_class_path_string_.replace(core_jar_pos, core_jar.size(), "/core-libart.jar");
+ }
+
+ if (compiler_callbacks_ == nullptr && image_.empty()) {
+ image_ += GetAndroidRoot();
+ image_ += "/framework/boot.art";
+ }
+ if (heap_growth_limit_ == 0) {
+ heap_growth_limit_ = heap_maximum_size_;
+ }
+ if (background_collector_type_ == gc::kCollectorTypeNone) {
+ background_collector_type_ = collector_type_;
+ }
+ return true;
+}
+
+void ParsedOptions::Exit(int status) {
+ hook_exit_(status);
+}
+
+void ParsedOptions::Abort() {
+ hook_abort_();
+}
+
+void ParsedOptions::UsageMessageV(FILE* stream, const char* fmt, va_list ap) {
+ hook_vfprintf_(stderr, fmt, ap);
+}
+
+void ParsedOptions::UsageMessage(FILE* stream, const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageMessageV(stream, fmt, ap);
+ va_end(ap);
+}
+
+void ParsedOptions::Usage(const char* fmt, ...) {
+ bool error = (fmt != nullptr);
+ FILE* stream = error ? stderr : stdout;
+
+ if (fmt != nullptr) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageMessageV(stream, fmt, ap);
+ va_end(ap);
+ }
+
+ const char* program = "dalvikvm";
+ UsageMessage(stream, "%s: [options] class [argument ...]\n", program);
+ UsageMessage(stream, "\n");
+ UsageMessage(stream, "The following standard options are supported:\n");
+ UsageMessage(stream, " -classpath classpath (-cp classpath)\n");
+ UsageMessage(stream, " -Dproperty=value\n");
+ UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
+ UsageMessage(stream, " -showversion\n");
+ UsageMessage(stream, " -help\n");
+ UsageMessage(stream, " -agentlib:jdwp=options\n");
+ UsageMessage(stream, "\n");
+
+ UsageMessage(stream, "The following extended options are supported:\n");
+ UsageMessage(stream, " -Xrunjdwp:<options>\n");
+ UsageMessage(stream, " -Xbootclasspath:bootclasspath\n");
+ UsageMessage(stream, " -Xcheck:tag (e.g. 'jni')\n");
+ UsageMessage(stream, " -XmsN (min heap, must be multiple of 1K, >= 1MB)\n");
+ UsageMessage(stream, " -XmxN (max heap, must be multiple of 1K, >= 2MB)\n");
+ UsageMessage(stream, " -XssN (stack size)\n");
+ UsageMessage(stream, " -Xint\n");
+ UsageMessage(stream, "\n");
+
+ UsageMessage(stream, "The following Dalvik options are supported:\n");
+ UsageMessage(stream, " -Xzygote\n");
+ UsageMessage(stream, " -Xjnitrace:substring (eg NativeClass or nativeMethod)\n");
+ UsageMessage(stream, " -Xstacktracefile:<filename>\n");
+ UsageMessage(stream, " -Xgc:[no]preverify\n");
+ UsageMessage(stream, " -Xgc:[no]postverify\n");
+ UsageMessage(stream, " -XX:+DisableExplicitGC\n");
+ UsageMessage(stream, " -XX:HeapGrowthLimit=N\n");
+ UsageMessage(stream, " -XX:HeapMinFree=N\n");
+ UsageMessage(stream, " -XX:HeapMaxFree=N\n");
+ UsageMessage(stream, " -XX:HeapTargetUtilization=doublevalue\n");
+ UsageMessage(stream, " -XX:LowMemoryMode\n");
+ UsageMessage(stream, " -Xprofile:{threadcpuclock,wallclock,dualclock}\n");
+ UsageMessage(stream, "\n");
+
+ UsageMessage(stream, "The following unique to ART options are supported:\n");
+ UsageMessage(stream, " -Xgc:[no]preverify_rosalloc\n");
+ UsageMessage(stream, " -Xgc:[no]postverify_rosalloc\n");
+ UsageMessage(stream, " -Ximage:filename\n");
+ UsageMessage(stream, " -XX:ParallelGCThreads=integervalue\n");
+ UsageMessage(stream, " -XX:ConcGCThreads=integervalue\n");
+ UsageMessage(stream, " -XX:MaxSpinsBeforeThinLockInflation=integervalue\n");
+ UsageMessage(stream, " -XX:LongPauseLogThreshold=integervalue\n");
+ UsageMessage(stream, " -XX:LongGCLogThreshold=integervalue\n");
+ UsageMessage(stream, " -XX:DumpGCPerformanceOnShutdown\n");
+ UsageMessage(stream, " -XX:IgnoreMaxFootprint\n");
+ UsageMessage(stream, " -XX:UseTLAB\n");
+ UsageMessage(stream, " -XX:BackgroundGC=none\n");
+ UsageMessage(stream, " -Xmethod-trace\n");
+ UsageMessage(stream, " -Xmethod-trace-file:filename");
+ UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
+ UsageMessage(stream, " -Xprofile=filename\n");
+ UsageMessage(stream, " -Xprofile-period:integervalue\n");
+ UsageMessage(stream, " -Xprofile-duration:integervalue\n");
+ UsageMessage(stream, " -Xprofile-interval:integervalue\n");
+ UsageMessage(stream, " -Xprofile-backoff:integervalue\n");
+ UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
+ UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
+ UsageMessage(stream, "\n");
+
+ UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
+ UsageMessage(stream, " -ea[:<package name>... |:<class name>]\n");
+ UsageMessage(stream, " -da[:<package name>... |:<class name>]\n");
+ UsageMessage(stream, " (-enableassertions, -disableassertions)\n");
+ UsageMessage(stream, " -esa\n");
+ UsageMessage(stream, " -dsa\n");
+ UsageMessage(stream, " (-enablesystemassertions, -disablesystemassertions)\n");
+ UsageMessage(stream, " -Xverify:{none,remote,all}\n");
+ UsageMessage(stream, " -Xrs\n");
+ UsageMessage(stream, " -Xint:portable, -Xint:fast, -Xint:jit\n");
+ UsageMessage(stream, " -Xdexopt:{none,verified,all,full}\n");
+ UsageMessage(stream, " -Xnoquithandler\n");
+ UsageMessage(stream, " -Xjniopts:{warnonly,forcecopy}\n");
+ UsageMessage(stream, " -Xjnigreflimit:integervalue\n");
+ UsageMessage(stream, " -Xgc:[no]precise\n");
+ UsageMessage(stream, " -Xgc:[no]verifycardtable\n");
+ UsageMessage(stream, " -X[no]genregmap\n");
+ UsageMessage(stream, " -Xverifyopt:[no]checkmon\n");
+ UsageMessage(stream, " -Xcheckdexsum\n");
+ UsageMessage(stream, " -Xincludeselectedop\n");
+ UsageMessage(stream, " -Xjitop:hexopvalue[-endvalue][,hexopvalue[-endvalue]]*\n");
+ UsageMessage(stream, " -Xincludeselectedmethod\n");
+ UsageMessage(stream, " -Xjitthreshold:integervalue\n");
+ UsageMessage(stream, " -Xjitcodecachesize:decimalvalueofkbytes\n");
+ UsageMessage(stream, " -Xjitblocking\n");
+ UsageMessage(stream, " -Xjitmethod:signature[,signature]* (eg Ljava/lang/String\\;replace)\n");
+ UsageMessage(stream, " -Xjitclass:classname[,classname]*\n");
+ UsageMessage(stream, " -Xjitoffset:offset[,offset]\n");
+ UsageMessage(stream, " -Xjitconfig:filename\n");
+ UsageMessage(stream, " -Xjitcheckcg\n");
+ UsageMessage(stream, " -Xjitverbose\n");
+ UsageMessage(stream, " -Xjitprofile\n");
+ UsageMessage(stream, " -Xjitdisableopt\n");
+ UsageMessage(stream, " -Xjitsuspendpoll\n");
+ UsageMessage(stream, " -XX:mainThreadStackSize=N\n");
+ UsageMessage(stream, "\n");
+
+ Exit((error) ? 1 : 0);
+}
+
+bool ParsedOptions::ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
+ std::string::size_type colon = s.find(c);
+ if (colon == std::string::npos) {
+ Usage("Missing char %c in option %s", c, s.c_str());
+ return false;
+ }
+ // Add one to remove the char we were trimming until.
+ *parsed_value = s.substr(colon + 1);
+ return true;
+}
+
+bool ParsedOptions::ParseInteger(const std::string& s, char after_char, int* parsed_value) {
+ std::string::size_type colon = s.find(after_char);
+ if (colon == std::string::npos) {
+ Usage("Missing char %c in option %s", after_char, s.c_str());
+ return false;
+ }
+ const char* begin = &s[colon + 1];
+ char* end;
+ size_t result = strtoul(begin, &end, 10);
+ if (begin == end || *end != '\0') {
+ Usage("Failed to parse integer from %s ", s.c_str());
+ return false;
+ }
+ *parsed_value = result;
+ return true;
+}
+
+bool ParsedOptions::ParseUnsignedInteger(const std::string& s, char after_char,
+ unsigned int* parsed_value) {
+ int i;
+ if (!ParseInteger(s, after_char, &i)) {
+ return false;
+ }
+ if (i < 0) {
+ Usage("Negative value %d passed for unsigned option %s", i, s.c_str());
+ return false;
+ }
+ *parsed_value = i;
+ return true;
+}
+
+bool ParsedOptions::ParseDouble(const std::string& option, char after_char,
+ double min, double max, double* parsed_value) {
+ std::string substring;
+ if (!ParseStringAfterChar(option, after_char, &substring)) {
+ return false;
+ }
+ std::istringstream iss(substring);
+ double value;
+ iss >> value;
+ // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
+ const bool sane_val = iss.eof() && (value >= min) && (value <= max);
+ if (!sane_val) {
+ Usage("Invalid double value %s for option %s", option.c_str());
+ return false;
+ }
+ *parsed_value = value;
+ return true;
+}
+
+} // namespace art
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
new file mode 100644
index 0000000000..b94956e84a
--- /dev/null
+++ b/runtime/parsed_options.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_PARSED_OPTIONS_H_
+#define ART_RUNTIME_PARSED_OPTIONS_H_
+
+#include <string>
+
+#include "runtime.h"
+#include "trace.h"
+
+namespace art {
+
+class ParsedOptions {
+ public:
+ // returns null if problem parsing and ignore_unrecognized is false
+ static ParsedOptions* Create(const Runtime::Options& options, bool ignore_unrecognized);
+
+ const std::vector<const DexFile*>* boot_class_path_;
+ std::string boot_class_path_string_;
+ std::string class_path_string_;
+ std::string host_prefix_;
+ std::string image_;
+ bool check_jni_;
+ std::string jni_trace_;
+ CompilerCallbacks* compiler_callbacks_;
+ bool is_zygote_;
+ bool interpreter_only_;
+ bool is_explicit_gc_disabled_;
+ bool use_tlab_;
+ bool verify_pre_gc_heap_;
+ bool verify_post_gc_heap_;
+ bool verify_pre_gc_rosalloc_;
+ bool verify_post_gc_rosalloc_;
+ size_t long_pause_log_threshold_;
+ size_t long_gc_log_threshold_;
+ bool dump_gc_performance_on_shutdown_;
+ bool ignore_max_footprint_;
+ size_t heap_initial_size_;
+ size_t heap_maximum_size_;
+ size_t heap_growth_limit_;
+ size_t heap_min_free_;
+ size_t heap_max_free_;
+ double heap_target_utilization_;
+ size_t parallel_gc_threads_;
+ size_t conc_gc_threads_;
+ gc::CollectorType collector_type_;
+ gc::CollectorType background_collector_type_;
+ size_t stack_size_;
+ size_t max_spins_before_thin_lock_inflation_;
+ bool low_memory_mode_;
+ size_t lock_profiling_threshold_;
+ std::string stack_trace_file_;
+ bool method_trace_;
+ std::string method_trace_file_;
+ size_t method_trace_file_size_;
+ bool (*hook_is_sensitive_thread_)();
+ jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
+ void (*hook_exit_)(jint status);
+ void (*hook_abort_)();
+ std::vector<std::string> properties_;
+ std::vector<std::string> compiler_options_;
+ std::vector<std::string> image_compiler_options_;
+ bool profile_;
+ std::string profile_output_filename_;
+ uint32_t profile_period_s_;
+ uint32_t profile_duration_s_;
+ uint32_t profile_interval_us_;
+ double profile_backoff_coefficient_;
+ ProfilerClockSource profile_clock_source_;
+
+ private:
+ ParsedOptions() {}
+
+ void Usage(const char* fmt, ...);
+ void UsageMessage(FILE* stream, const char* fmt, ...);
+ void UsageMessageV(FILE* stream, const char* fmt, va_list ap);
+
+ void Exit(int status);
+ void Abort();
+
+ bool Parse(const Runtime::Options& options, bool ignore_unrecognized);
+ bool ParseStringAfterChar(const std::string& option, char after_char, std::string* parsed_value);
+ bool ParseInteger(const std::string& option, char after_char, int* parsed_value);
+ bool ParseUnsignedInteger(const std::string& option, char after_char, unsigned int* parsed_value);
+ bool ParseDouble(const std::string& option, char after_char, double min, double max,
+ double* parsed_value);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_PARSED_OPTIONS_H_
diff --git a/runtime/runtime_test.cc b/runtime/parsed_options_test.cc
index 5b881e5011..58353b13a2 100644
--- a/runtime/runtime_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#include "runtime.h"
+#include "parsed_options.h"
#include "UniquePtr.h"
#include "common_runtime_test.h"
namespace art {
-class RuntimeTest : public CommonRuntimeTest {};
+class ParsedOptionsTest : public CommonRuntimeTest {};
-TEST_F(RuntimeTest, ParsedOptions) {
+TEST_F(ParsedOptionsTest, ParsedOptions) {
void* test_vfprintf = reinterpret_cast<void*>(0xa);
void* test_abort = reinterpret_cast<void*>(0xb);
void* test_exit = reinterpret_cast<void*>(0xc);
@@ -54,7 +54,7 @@ TEST_F(RuntimeTest, ParsedOptions) {
options.push_back(std::make_pair("vfprintf", test_vfprintf));
options.push_back(std::make_pair("abort", test_abort));
options.push_back(std::make_pair("exit", test_exit));
- UniquePtr<Runtime::ParsedOptions> parsed(Runtime::ParsedOptions::Create(options, false));
+ UniquePtr<ParsedOptions> parsed(ParsedOptions::Create(options, false));
ASSERT_TRUE(parsed.get() != NULL);
EXPECT_EQ(lib_core, parsed->boot_class_path_string_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1ef15f7416..ae9c9836c2 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -51,6 +51,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
+#include "parsed_options.h"
#include "oat_file.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -91,6 +92,8 @@ Runtime::Runtime()
resolution_method_(nullptr),
imt_conflict_method_(nullptr),
default_imt_(nullptr),
+ fault_message_lock_("Fault message lock"),
+ fault_message_(""),
method_verifiers_lock_("Method verifiers lock"),
threads_being_born_(0),
shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
@@ -263,113 +266,6 @@ void Runtime::CallExitHook(jint status) {
}
}
-// Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify
-// memory sizes. [kK] indicates kilobytes, [mM] megabytes, and
-// [gG] gigabytes.
-//
-// "s" should point just past the "-Xm?" part of the string.
-// "div" specifies a divisor, e.g. 1024 if the value must be a multiple
-// of 1024.
-//
-// The spec says the -Xmx and -Xms options must be multiples of 1024. It
-// doesn't say anything about -Xss.
-//
-// Returns 0 (a useless size) if "s" is malformed or specifies a low or
-// non-evenly-divisible value.
-//
-size_t ParseMemoryOption(const char* s, size_t div) {
- // strtoul accepts a leading [+-], which we don't want,
- // so make sure our string starts with a decimal digit.
- if (isdigit(*s)) {
- char* s2;
- size_t val = strtoul(s, &s2, 10);
- if (s2 != s) {
- // s2 should be pointing just after the number.
- // If this is the end of the string, the user
- // has specified a number of bytes. Otherwise,
- // there should be exactly one more character
- // that specifies a multiplier.
- if (*s2 != '\0') {
- // The remainder of the string is either a single multiplier
- // character, or nothing to indicate that the value is in
- // bytes.
- char c = *s2++;
- if (*s2 == '\0') {
- size_t mul;
- if (c == '\0') {
- mul = 1;
- } else if (c == 'k' || c == 'K') {
- mul = KB;
- } else if (c == 'm' || c == 'M') {
- mul = MB;
- } else if (c == 'g' || c == 'G') {
- mul = GB;
- } else {
- // Unknown multiplier character.
- return 0;
- }
-
- if (val <= std::numeric_limits<size_t>::max() / mul) {
- val *= mul;
- } else {
- // Clamp to a multiple of 1024.
- val = std::numeric_limits<size_t>::max() & ~(1024-1);
- }
- } else {
- // There's more than one character after the numeric part.
- return 0;
- }
- }
- // The man page says that a -Xm value must be a multiple of 1024.
- if (val % div == 0) {
- return val;
- }
- }
- }
- return 0;
-}
-
-static const std::string StringAfterChar(const std::string& s, char c) {
- std::string::size_type colon = s.find(c);
- if (colon == std::string::npos) {
- LOG(FATAL) << "Missing char " << c << " in string " << s;
- }
- // Add one to remove the char we were trimming until.
- return s.substr(colon + 1);
-}
-
-static size_t ParseIntegerOrDie(const std::string& s, char after_char) {
- std::string::size_type colon = s.find(after_char);
- if (colon == std::string::npos) {
- LOG(FATAL) << "Missing char " << after_char << " in string " << s;
- }
- const char* begin = &s[colon + 1];
- char* end;
- size_t result = strtoul(begin, &end, 10);
- if (begin == end || *end != '\0') {
- LOG(FATAL) << "Failed to parse integer in: " << s;
- }
- return result;
-}
-
-
-static double ParseDoubleOrDie(const std::string& option, char after_char, double min, double max,
- bool ignore_unrecognized, double defval) {
- std::istringstream iss(StringAfterChar(option, after_char));
- double value;
- iss >> value;
- // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
- const bool sane_val = iss.eof() && (value >= min) && (value <= max);
- if (!sane_val) {
- if (ignore_unrecognized) {
- return defval;
- }
- LOG(FATAL)<< "Invalid option '" << option << "'";
- return defval;
- }
- return value;
-}
-
void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
GetInternTable()->SweepInternTableWeaks(visitor, arg);
GetMonitorList()->SweepMonitorList(visitor, arg);
@@ -377,384 +273,6 @@ void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
Dbg::UpdateObjectPointers(visitor, arg);
}
-static gc::CollectorType ParseCollectorType(const std::string& option) {
- if (option == "MS" || option == "nonconcurrent") {
- return gc::kCollectorTypeMS;
- } else if (option == "CMS" || option == "concurrent") {
- return gc::kCollectorTypeCMS;
- } else if (option == "SS") {
- return gc::kCollectorTypeSS;
- } else if (option == "GSS") {
- return gc::kCollectorTypeGSS;
- } else {
- return gc::kCollectorTypeNone;
- }
-}
-
-Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, bool ignore_unrecognized) {
- UniquePtr<ParsedOptions> parsed(new ParsedOptions());
- const char* boot_class_path_string = getenv("BOOTCLASSPATH");
- if (boot_class_path_string != NULL) {
- parsed->boot_class_path_string_ = boot_class_path_string;
- }
- const char* class_path_string = getenv("CLASSPATH");
- if (class_path_string != NULL) {
- parsed->class_path_string_ = class_path_string;
- }
- // -Xcheck:jni is off by default for regular builds but on by default in debug builds.
- parsed->check_jni_ = kIsDebugBuild;
-
- parsed->heap_initial_size_ = gc::Heap::kDefaultInitialSize;
- parsed->heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
- parsed->heap_min_free_ = gc::Heap::kDefaultMinFree;
- parsed->heap_max_free_ = gc::Heap::kDefaultMaxFree;
- parsed->heap_target_utilization_ = gc::Heap::kDefaultTargetUtilization;
- parsed->heap_growth_limit_ = 0; // 0 means no growth limit .
- // Default to number of processors minus one since the main GC thread also does work.
- parsed->parallel_gc_threads_ = sysconf(_SC_NPROCESSORS_CONF) - 1;
- // Only the main GC thread, no workers.
- parsed->conc_gc_threads_ = 0;
- // Default is CMS which is Sticky + Partial + Full CMS GC.
- parsed->collector_type_ = gc::kCollectorTypeCMS;
- // If background_collector_type_ is kCollectorTypeNone, it defaults to the collector_type_ after
- // parsing options.
- parsed->background_collector_type_ = gc::kCollectorTypeNone;
- parsed->stack_size_ = 0; // 0 means default.
- parsed->max_spins_before_thin_lock_inflation_ = Monitor::kDefaultMaxSpinsBeforeThinLockInflation;
- parsed->low_memory_mode_ = false;
- parsed->use_tlab_ = false;
- parsed->verify_pre_gc_heap_ = false;
- parsed->verify_post_gc_heap_ = kIsDebugBuild;
- parsed->verify_pre_gc_rosalloc_ = kIsDebugBuild;
- parsed->verify_post_gc_rosalloc_ = false;
-
- parsed->compiler_callbacks_ = nullptr;
- parsed->is_zygote_ = false;
- parsed->interpreter_only_ = false;
- parsed->is_explicit_gc_disabled_ = false;
-
- parsed->long_pause_log_threshold_ = gc::Heap::kDefaultLongPauseLogThreshold;
- parsed->long_gc_log_threshold_ = gc::Heap::kDefaultLongGCLogThreshold;
- parsed->dump_gc_performance_on_shutdown_ = false;
- parsed->ignore_max_footprint_ = false;
-
- parsed->lock_profiling_threshold_ = 0;
- parsed->hook_is_sensitive_thread_ = NULL;
-
- parsed->hook_vfprintf_ = vfprintf;
- parsed->hook_exit_ = exit;
- parsed->hook_abort_ = NULL; // We don't call abort(3) by default; see Runtime::Abort.
-
-// gLogVerbosity.class_linker = true; // TODO: don't check this in!
-// gLogVerbosity.compiler = true; // TODO: don't check this in!
-// gLogVerbosity.verifier = true; // TODO: don't check this in!
-// gLogVerbosity.heap = true; // TODO: don't check this in!
-// gLogVerbosity.gc = true; // TODO: don't check this in!
-// gLogVerbosity.jdwp = true; // TODO: don't check this in!
-// gLogVerbosity.jni = true; // TODO: don't check this in!
-// gLogVerbosity.monitor = true; // TODO: don't check this in!
-// gLogVerbosity.startup = true; // TODO: don't check this in!
-// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
-// gLogVerbosity.threads = true; // TODO: don't check this in!
-
- parsed->method_trace_ = false;
- parsed->method_trace_file_ = "/data/method-trace-file.bin";
- parsed->method_trace_file_size_ = 10 * MB;
-
- parsed->profile_ = false;
- parsed->profile_period_s_ = 10; // Seconds.
- parsed->profile_duration_s_ = 20; // Seconds.
- parsed->profile_interval_us_ = 500; // Microseconds.
- parsed->profile_backoff_coefficient_ = 2.0;
-
- for (size_t i = 0; i < options.size(); ++i) {
- const std::string option(options[i].first);
- if (true && options[0].first == "-Xzygote") {
- LOG(INFO) << "option[" << i << "]=" << option;
- }
- if (StartsWith(option, "-Xbootclasspath:")) {
- parsed->boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
- } else if (option == "-classpath" || option == "-cp") {
- // TODO: support -Djava.class.path
- i++;
- if (i == options.size()) {
- // TODO: usage
- LOG(FATAL) << "Missing required class path value for " << option;
- return NULL;
- }
- const StringPiece& value = options[i].first;
- parsed->class_path_string_ = value.data();
- } else if (option == "bootclasspath") {
- parsed->boot_class_path_
- = reinterpret_cast<const std::vector<const DexFile*>*>(options[i].second);
- } else if (StartsWith(option, "-Ximage:")) {
- parsed->image_ = StringAfterChar(option, ':');
- } else if (StartsWith(option, "-Xcheck:jni")) {
- parsed->check_jni_ = true;
- } else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) {
- std::string tail(option.substr(option[1] == 'X' ? 10 : 15));
- if (tail == "help" || !Dbg::ParseJdwpOptions(tail)) {
- LOG(FATAL) << "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n"
- << "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n";
- return NULL;
- }
- } else if (StartsWith(option, "-Xms")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-Xms")).c_str(), 1024);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->heap_initial_size_ = size;
- } else if (StartsWith(option, "-Xmx")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-Xmx")).c_str(), 1024);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->heap_maximum_size_ = size;
- } else if (StartsWith(option, "-XX:HeapGrowthLimit=")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapGrowthLimit=")).c_str(), 1024);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->heap_growth_limit_ = size;
- } else if (StartsWith(option, "-XX:HeapMinFree=")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMinFree=")).c_str(), 1024);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->heap_min_free_ = size;
- } else if (StartsWith(option, "-XX:HeapMaxFree=")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMaxFree=")).c_str(), 1024);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->heap_max_free_ = size;
- } else if (StartsWith(option, "-XX:HeapTargetUtilization=")) {
- parsed->heap_target_utilization_ = ParseDoubleOrDie(
- option, '=', 0.1, 0.9, ignore_unrecognized, parsed->heap_target_utilization_);
- } else if (StartsWith(option, "-XX:ParallelGCThreads=")) {
- parsed->parallel_gc_threads_ = ParseIntegerOrDie(option, '=');
- } else if (StartsWith(option, "-XX:ConcGCThreads=")) {
- parsed->conc_gc_threads_ = ParseIntegerOrDie(option, '=');
- } else if (StartsWith(option, "-Xss")) {
- size_t size = ParseMemoryOption(option.substr(strlen("-Xss")).c_str(), 1);
- if (size == 0) {
- if (ignore_unrecognized) {
- continue;
- }
- // TODO: usage
- LOG(FATAL) << "Failed to parse " << option;
- return NULL;
- }
- parsed->stack_size_ = size;
- } else if (StartsWith(option, "-XX:MaxSpinsBeforeThinLockInflation=")) {
- parsed->max_spins_before_thin_lock_inflation_ = ParseIntegerOrDie(option, '=');
- } else if (StartsWith(option, "-XX:LongPauseLogThreshold=")) {
- parsed->long_pause_log_threshold_ = MsToNs(ParseIntegerOrDie(option, '='));
- } else if (StartsWith(option, "-XX:LongGCLogThreshold=")) {
- parsed->long_gc_log_threshold_ = MsToNs(ParseIntegerOrDie(option, '='));
- } else if (option == "-XX:DumpGCPerformanceOnShutdown") {
- parsed->dump_gc_performance_on_shutdown_ = true;
- } else if (option == "-XX:IgnoreMaxFootprint") {
- parsed->ignore_max_footprint_ = true;
- } else if (option == "-XX:LowMemoryMode") {
- parsed->low_memory_mode_ = true;
- } else if (option == "-XX:UseTLAB") {
- parsed->use_tlab_ = true;
- } else if (StartsWith(option, "-D")) {
- parsed->properties_.push_back(option.substr(strlen("-D")));
- } else if (StartsWith(option, "-Xjnitrace:")) {
- parsed->jni_trace_ = option.substr(strlen("-Xjnitrace:"));
- } else if (option == "compilercallbacks") {
- parsed->compiler_callbacks_ =
- reinterpret_cast<CompilerCallbacks*>(const_cast<void*>(options[i].second));
- } else if (option == "-Xzygote") {
- parsed->is_zygote_ = true;
- } else if (option == "-Xint") {
- parsed->interpreter_only_ = true;
- } else if (StartsWith(option, "-Xgc:")) {
- std::vector<std::string> gc_options;
- Split(option.substr(strlen("-Xgc:")), ',', gc_options);
- for (const std::string& gc_option : gc_options) {
- gc::CollectorType collector_type = ParseCollectorType(gc_option);
- if (collector_type != gc::kCollectorTypeNone) {
- parsed->collector_type_ = collector_type;
- } else if (gc_option == "preverify") {
- parsed->verify_pre_gc_heap_ = true;
- } else if (gc_option == "nopreverify") {
- parsed->verify_pre_gc_heap_ = false;
- } else if (gc_option == "postverify") {
- parsed->verify_post_gc_heap_ = true;
- } else if (gc_option == "nopostverify") {
- parsed->verify_post_gc_heap_ = false;
- } else if (gc_option == "preverify_rosalloc") {
- parsed->verify_pre_gc_rosalloc_ = true;
- } else if (gc_option == "nopreverify_rosalloc") {
- parsed->verify_pre_gc_rosalloc_ = false;
- } else if (gc_option == "postverify_rosalloc") {
- parsed->verify_post_gc_rosalloc_ = true;
- } else if (gc_option == "nopostverify_rosalloc") {
- parsed->verify_post_gc_rosalloc_ = false;
- } else {
- LOG(WARNING) << "Ignoring unknown -Xgc option: " << gc_option;
- }
- }
- } else if (StartsWith(option, "-XX:BackgroundGC=")) {
- const std::string substring = StringAfterChar(option, '=');
- gc::CollectorType collector_type = ParseCollectorType(substring);
- if (collector_type != gc::kCollectorTypeNone) {
- parsed->background_collector_type_ = collector_type;
- } else {
- LOG(WARNING) << "Ignoring unknown -XX:BackgroundGC option: " << substring;
- }
- } else if (option == "-XX:+DisableExplicitGC") {
- parsed->is_explicit_gc_disabled_ = true;
- } else if (StartsWith(option, "-verbose:")) {
- std::vector<std::string> verbose_options;
- Split(option.substr(strlen("-verbose:")), ',', verbose_options);
- for (size_t i = 0; i < verbose_options.size(); ++i) {
- if (verbose_options[i] == "class") {
- gLogVerbosity.class_linker = true;
- } else if (verbose_options[i] == "verifier") {
- gLogVerbosity.verifier = true;
- } else if (verbose_options[i] == "compiler") {
- gLogVerbosity.compiler = true;
- } else if (verbose_options[i] == "heap") {
- gLogVerbosity.heap = true;
- } else if (verbose_options[i] == "gc") {
- gLogVerbosity.gc = true;
- } else if (verbose_options[i] == "jdwp") {
- gLogVerbosity.jdwp = true;
- } else if (verbose_options[i] == "jni") {
- gLogVerbosity.jni = true;
- } else if (verbose_options[i] == "monitor") {
- gLogVerbosity.monitor = true;
- } else if (verbose_options[i] == "startup") {
- gLogVerbosity.startup = true;
- } else if (verbose_options[i] == "third-party-jni") {
- gLogVerbosity.third_party_jni = true;
- } else if (verbose_options[i] == "threads") {
- gLogVerbosity.threads = true;
- } else {
- LOG(WARNING) << "Ignoring unknown -verbose option: " << verbose_options[i];
- }
- }
- } else if (StartsWith(option, "-Xjnigreflimit:")) {
- // Silently ignored for backwards compatibility.
- } else if (StartsWith(option, "-Xlockprofthreshold:")) {
- parsed->lock_profiling_threshold_ = ParseIntegerOrDie(option, ':');
- } else if (StartsWith(option, "-Xstacktracefile:")) {
- parsed->stack_trace_file_ = StringAfterChar(option, ':');
- } else if (option == "sensitiveThread") {
- parsed->hook_is_sensitive_thread_ = reinterpret_cast<bool (*)()>(const_cast<void*>(options[i].second));
- } else if (option == "vfprintf") {
- parsed->hook_vfprintf_ =
- reinterpret_cast<int (*)(FILE *, const char*, va_list)>(const_cast<void*>(options[i].second));
- } else if (option == "exit") {
- parsed->hook_exit_ = reinterpret_cast<void(*)(jint)>(const_cast<void*>(options[i].second));
- } else if (option == "abort") {
- parsed->hook_abort_ = reinterpret_cast<void(*)()>(const_cast<void*>(options[i].second));
- } else if (option == "host-prefix") {
- parsed->host_prefix_ = reinterpret_cast<const char*>(options[i].second);
- } else if (option == "-Xgenregmap" || option == "-Xgc:precise") {
- // We silently ignore these for backwards compatibility.
- } else if (option == "-Xmethod-trace") {
- parsed->method_trace_ = true;
- } else if (StartsWith(option, "-Xmethod-trace-file:")) {
- parsed->method_trace_file_ = option.substr(strlen("-Xmethod-trace-file:"));
- } else if (StartsWith(option, "-Xmethod-trace-file-size:")) {
- parsed->method_trace_file_size_ = ParseIntegerOrDie(option, ':');
- } else if (option == "-Xprofile:threadcpuclock") {
- Trace::SetDefaultClockSource(kProfilerClockSourceThreadCpu);
- } else if (option == "-Xprofile:wallclock") {
- Trace::SetDefaultClockSource(kProfilerClockSourceWall);
- } else if (option == "-Xprofile:dualclock") {
- Trace::SetDefaultClockSource(kProfilerClockSourceDual);
- } else if (StartsWith(option, "-Xprofile:")) {
- parsed->profile_output_filename_ = StringAfterChar(option, ';');
- parsed->profile_ = true;
- } else if (StartsWith(option, "-Xprofile-period:")) {
- parsed->profile_period_s_ = ParseIntegerOrDie(option, ':');
- } else if (StartsWith(option, "-Xprofile-duration:")) {
- parsed->profile_duration_s_ = ParseIntegerOrDie(option, ':');
- } else if (StartsWith(option, "-Xprofile-interval:")) {
- parsed->profile_interval_us_ = ParseIntegerOrDie(option, ':');
- } else if (StartsWith(option, "-Xprofile-backoff:")) {
- parsed->profile_backoff_coefficient_ = ParseDoubleOrDie(
- option, ':', 1.0, 10.0, ignore_unrecognized, parsed->profile_backoff_coefficient_);
- } else if (option == "-Xcompiler-option") {
- i++;
- if (i == options.size()) {
- // TODO: usage
- LOG(FATAL) << "Missing required compiler option for " << option;
- return NULL;
- }
- parsed->compiler_options_.push_back(options[i].first);
- } else if (option == "-Ximage-compiler-option") {
- i++;
- if (i == options.size()) {
- // TODO: usage
- LOG(FATAL) << "Missing required compiler option for " << option;
- return NULL;
- }
- parsed->image_compiler_options_.push_back(options[i].first);
- } else {
- if (!ignore_unrecognized) {
- // TODO: print usage via vfprintf
- LOG(ERROR) << "Unrecognized option " << option;
- // TODO: this should exit, but for now tolerate unknown options
- // return NULL;
- }
- }
- }
-
- // If a reference to the dalvik core.jar snuck in, replace it with
- // the art specific version. This can happen with on device
- // boot.art/boot.oat generation by GenerateImage which relies on the
- // value of BOOTCLASSPATH.
- std::string core_jar("/core.jar");
- size_t core_jar_pos = parsed->boot_class_path_string_.find(core_jar);
- if (core_jar_pos != std::string::npos) {
- parsed->boot_class_path_string_.replace(core_jar_pos, core_jar.size(), "/core-libart.jar");
- }
-
- if (parsed->compiler_callbacks_ == nullptr && parsed->image_.empty()) {
- parsed->image_ += GetAndroidRoot();
- parsed->image_ += "/framework/boot.art";
- }
- if (parsed->heap_growth_limit_ == 0) {
- parsed->heap_growth_limit_ = parsed->heap_maximum_size_;
- }
- if (parsed->background_collector_type_ == gc::kCollectorTypeNone) {
- parsed->background_collector_type_ = parsed->collector_type_;
- }
- return parsed.release();
-}
-
bool Runtime::Create(const Options& options, bool ignore_unrecognized) {
// TODO: acquire a static mutex on Runtime to avoid racing.
if (Runtime::instance_ != NULL) {
@@ -799,8 +317,9 @@ jobject CreateSystemClassLoader() {
soa.Self()->SetClassLoaderOverride(class_loader.get());
- SirtRef<mirror::Class> thread_class(soa.Self(),
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread));
+ SirtRef<mirror::Class> thread_class(
+ soa.Self(),
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread));
CHECK(cl->EnsureInitialized(thread_class, true, true));
mirror::ArtField* contextClassLoader =
@@ -1046,12 +565,15 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
method_trace_file_size_ = options->method_trace_file_size_;
// Extract the profile options.
+ // TODO: move into a Trace options struct?
profile_period_s_ = options->profile_period_s_;
profile_duration_s_ = options->profile_duration_s_;
profile_interval_us_ = options->profile_interval_us_;
profile_backoff_coefficient_ = options->profile_backoff_coefficient_;
profile_ = options->profile_;
profile_output_filename_ = options->profile_output_filename_;
+ // TODO: move this to just be an Trace::Start argument
+ Trace::SetDefaultClockSource(options->profile_clock_source_);
if (options->method_trace_) {
Trace::Start(options->method_trace_file_.c_str(), -1, options->method_trace_file_size_, 0,
@@ -1060,7 +582,8 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
// Pre-allocate an OutOfMemoryError for the double-OOME case.
self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
- "OutOfMemoryError thrown while trying to throw OutOfMemoryError; no stack available");
+ "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
+ "no stack available");
pre_allocated_OutOfMemoryError_ = self->GetException(NULL);
self->ClearException();
@@ -1108,12 +631,14 @@ void Runtime::InitThreadGroups(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
ScopedJniEnvLocalRefState env_state(env);
main_thread_group_ =
- env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup,
- WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
+ env->NewGlobalRef(env->GetStaticObjectField(
+ WellKnownClasses::java_lang_ThreadGroup,
+ WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
CHECK(main_thread_group_ != NULL || IsCompiler());
system_thread_group_ =
- env->NewGlobalRef(env->GetStaticObjectField(WellKnownClasses::java_lang_ThreadGroup,
- WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
+ env->NewGlobalRef(env->GetStaticObjectField(
+ WellKnownClasses::java_lang_ThreadGroup,
+ WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
CHECK(system_thread_group_ != NULL || IsCompiler());
}
@@ -1457,7 +982,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
(1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
(1 << art::x86_64::R8) | (1 << art::x86_64::R9);
uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save
+ (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save
uint32_t fp_arg_spills =
(1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
(1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
@@ -1504,7 +1029,8 @@ const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(jobject clas
return it->second;
}
-void Runtime::SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path) {
+void Runtime::SetCompileTimeClassPath(jobject class_loader,
+ std::vector<const DexFile*>& class_path) {
CHECK(!IsStarted());
use_compile_time_class_path_ = true;
compile_time_class_paths_.Put(class_loader, class_path);
@@ -1525,8 +1051,9 @@ void Runtime::RemoveMethodVerifier(verifier::MethodVerifier* verifier) {
}
void Runtime::StartProfiler(const char *appDir, bool startImmediately) {
- BackgroundMethodSamplingProfiler::Start(profile_period_s_, profile_duration_s_, appDir, profile_interval_us_,
- profile_backoff_coefficient_, startImmediately);
+ BackgroundMethodSamplingProfiler::Start(profile_period_s_, profile_duration_s_, appDir,
+ profile_interval_us_, profile_backoff_coefficient_,
+ startImmediately);
}
// Transaction support.
@@ -1598,4 +1125,10 @@ void Runtime::RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) con
DCHECK(IsActiveTransaction());
preinitialization_transaction->RecordWeakStringRemoval(s, hash_code);
}
+
+void Runtime::SetFaultMessage(const std::string& message) {
+ MutexLock mu(Thread::Current(), fault_message_lock_);
+ fault_message_ = message;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 892492124c..5ff334f6be 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -60,7 +60,7 @@ class ClassLinker;
class CompilerCallbacks;
class DexFile;
class InternTable;
-struct JavaVMExt;
+class JavaVMExt;
class MonitorList;
class MonitorPool;
class SignalCatcher;
@@ -72,67 +72,6 @@ class Runtime {
public:
typedef std::vector<std::pair<std::string, const void*> > Options;
- class ParsedOptions {
- public:
- // returns null if problem parsing and ignore_unrecognized is false
- static ParsedOptions* Create(const Options& options, bool ignore_unrecognized);
-
- const std::vector<const DexFile*>* boot_class_path_;
- std::string boot_class_path_string_;
- std::string class_path_string_;
- std::string host_prefix_;
- std::string image_;
- bool check_jni_;
- std::string jni_trace_;
- CompilerCallbacks* compiler_callbacks_;
- bool is_zygote_;
- bool interpreter_only_;
- bool is_explicit_gc_disabled_;
- bool use_tlab_;
- bool verify_pre_gc_heap_;
- bool verify_post_gc_heap_;
- bool verify_pre_gc_rosalloc_;
- bool verify_post_gc_rosalloc_;
- size_t long_pause_log_threshold_;
- size_t long_gc_log_threshold_;
- bool dump_gc_performance_on_shutdown_;
- bool ignore_max_footprint_;
- size_t heap_initial_size_;
- size_t heap_maximum_size_;
- size_t heap_growth_limit_;
- size_t heap_min_free_;
- size_t heap_max_free_;
- double heap_target_utilization_;
- size_t parallel_gc_threads_;
- size_t conc_gc_threads_;
- gc::CollectorType collector_type_;
- gc::CollectorType background_collector_type_;
- size_t stack_size_;
- size_t max_spins_before_thin_lock_inflation_;
- bool low_memory_mode_;
- size_t lock_profiling_threshold_;
- std::string stack_trace_file_;
- bool method_trace_;
- std::string method_trace_file_;
- size_t method_trace_file_size_;
- bool (*hook_is_sensitive_thread_)();
- jint (*hook_vfprintf_)(FILE* stream, const char* format, va_list ap);
- void (*hook_exit_)(jint status);
- void (*hook_abort_)();
- std::vector<std::string> properties_;
- std::vector<std::string> compiler_options_;
- std::vector<std::string> image_compiler_options_;
- bool profile_;
- std::string profile_output_filename_;
- int profile_period_s_;
- int profile_duration_s_;
- int profile_interval_us_;
- double profile_backoff_coefficient_;
-
- private:
- ParsedOptions() {}
- };
-
// Creates and initializes a new runtime.
static bool Create(const Options& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
@@ -279,7 +218,7 @@ class Runtime {
return thread_list_;
}
- const char* GetVersion() const {
+ static const char* GetVersion() {
return "2.0.0";
}
@@ -446,6 +385,13 @@ class Runtime {
void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SetFaultMessage(const std::string& message);
+ // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
+ // with the unexpected_signal_lock_.
+ const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
+ return fault_message_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -520,6 +466,10 @@ class Runtime {
mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ // Fault message, printed when we get a SIGSEGV.
+ Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::string fault_message_ GUARDED_BY(fault_message_lock_);
+
// Method verifier set, used so that we can update their GC roots.
Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::set<verifier::MethodVerifier*> method_verifiers_;
@@ -555,9 +505,9 @@ class Runtime {
// Runtime profile support.
bool profile_;
std::string profile_output_filename_;
- uint32_t profile_period_s_; // Generate profile every n seconds.
- uint32_t profile_duration_s_; // Run profile for n seconds.
- uint32_t profile_interval_us_; // Microseconds between samples.
+ uint32_t profile_period_s_; // Generate profile every n seconds.
+ uint32_t profile_duration_s_; // Run profile for n seconds.
+ uint32_t profile_interval_us_; // Microseconds between samples.
double profile_backoff_coefficient_; // Coefficient to exponential backoff.
bool method_trace_;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 2013294dfb..a2377229da 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,12 +14,60 @@
* limitations under the License.
*/
-#include "runtime.h"
+#include <signal.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <inttypes.h>
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "base/stringprintf.h"
+#include "thread-inl.h"
+#include "utils.h"
namespace art {
+static constexpr bool kDumpHeapObjectOnSigsevg = false;
+
+struct sigaction old_action;
+void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
+ static bool handlingUnexpectedSignal = false;
+ if (handlingUnexpectedSignal) {
+ LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
+ LogMessage::LogLine(data, "HandleUnexpectedSignal reentered\n");
+ _exit(1);
+ }
+ handlingUnexpectedSignal = true;
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ // Print this out first in case DumpObject faults.
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ gc::Heap* heap = runtime->GetHeap();
+ if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
+ // Run the old signal handler.
+ old_action.sa_sigaction(signal_number, info, raw_context);
+}
+
void Runtime::InitPlatformSignalHandlers() {
- // On a device, debuggerd will give us a stack trace. Nothing to do here.
+ // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = HandleUnexpectedSignal;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+ int rc = 0;
+ rc += sigaction(SIGSEGV, &action, &old_action);
+ CHECK_EQ(rc, 0);
}
} // namespace art
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 73ac034633..da1b2cac14 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -24,11 +24,13 @@
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stringprintf.h"
-#include "thread.h"
+#include "thread-inl.h"
#include "utils.h"
namespace art {
+static constexpr bool kDumpHeapObjectOnSigsevg = false;
+
struct Backtrace {
void Dump(std::ostream& os) {
DumpNativeStack(os, GetTid(), "\t", true);
@@ -305,7 +307,15 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
<< "Thread: " << tid << " \"" << thread_name << "\"\n"
<< "Registers:\n" << Dumpable<UContext>(thread_context) << "\n"
<< "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace);
-
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ gc::Heap* heap = runtime->GetHeap();
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 4fe9169f9b..f843ae5714 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -40,6 +40,7 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "gc_map.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -109,8 +110,6 @@ void Thread::InitTlsEntryPoints() {
&quick_entrypoints_);
}
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
-
void Thread::ResetQuickAllocEntryPointsForThread() {
ResetQuickAllocEntryPoints(&quick_entrypoints_);
}
@@ -1752,6 +1751,7 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
+ QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline),
};
#undef QUICK_ENTRY_POINT_INFO
diff --git a/runtime/thread.h b/runtime/thread.h
index f9d31af25d..6df2b1c994 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,12 @@
namespace art {
+namespace gc {
+namespace collector {
+class SemiSpace;
+} // namespace collector
+} // namespace gc
+
namespace mirror {
class ArtMethod;
class Array;
@@ -61,7 +67,7 @@ class Closure;
class Context;
struct DebugInvokeReq;
class DexFile;
-struct JavaVMExt;
+class JavaVMExt;
struct JNIEnvExt;
class Monitor;
class Runtime;
@@ -851,6 +857,7 @@ class PACKED(4) Thread {
private:
friend class Dbg; // For SetStateUnsafe.
+ friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Monitor;
friend class MonitorInfo;
friend class Runtime; // For CreatePeer.
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 18185d4ba6..1f2447871a 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -115,11 +115,7 @@ static const uint16_t kTraceVersionDualClock = 3;
static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2
static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps
-#if defined(HAVE_POSIX_CLOCKS)
-ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceDual;
-#else
-ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceWall;
-#endif
+ProfilerClockSource Trace::default_clock_source_ = kDefaultProfilerClockSource;
Trace* volatile Trace::the_trace_ = NULL;
pthread_t Trace::sampling_pthread_ = 0U;
diff --git a/runtime/trace.h b/runtime/trace.h
index d810df05fe..1af12831e7 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -42,6 +42,12 @@ enum ProfilerClockSource {
kProfilerClockSourceDual, // Both wall and thread CPU clocks.
};
+#if defined(HAVE_POSIX_CLOCKS)
+const ProfilerClockSource kDefaultProfilerClockSource = kProfilerClockSourceDual;
+#else
+const ProfilerClockSource kDefaultProfilerClockSource = kProfilerClockSourceWall;
+#endif
+
enum TracingMode {
kTracingInactive,
kMethodTracingActive,
diff --git a/runtime/utils.cc b/runtime/utils.cc
index df1ab9446a..d8f8f8f0a3 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1232,8 +1232,8 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
execv(program, &args[0]);
- PLOG(FATAL) << "Failed to execv(" << command_line << ")";
- return false;
+ PLOG(ERROR) << "Failed to execv(" << command_line << ")";
+ exit(1);
} else {
if (pid == -1) {
*error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 0bf6767e7d..c43fee55f2 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -32,10 +32,10 @@ class ZipArchiveTest : public CommonRuntimeTest {};
TEST_F(ZipArchiveTest, FindAndExtract) {
std::string error_msg;
UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileName().c_str(), &error_msg));
- ASSERT_TRUE(zip_archive.get() != false) << error_msg;
+ ASSERT_TRUE(zip_archive.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
UniquePtr<ZipEntry> zip_entry(zip_archive->Find("classes.dex", &error_msg));
- ASSERT_TRUE(zip_entry.get() != false);
+ ASSERT_TRUE(zip_entry.get() != nullptr);
ASSERT_TRUE(error_msg.empty());
ScratchFile tmp;
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 57065ef01d..541f0f8dec 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -4,6 +4,7 @@ b2487514 passes
b5884080 passes
largeFrame passes
largeFrameFloat passes
+mulBy1Test passes
getterSetterTest passes
identityTest passes
wideGetterSetterTest passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 6829388d2a..81f5b14af0 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -35,6 +35,7 @@ public class Main {
b5884080Test();
largeFrameTest();
largeFrameTestFloat();
+ mulBy1Test();
getterSetterTest();
identityTest();
wideGetterSetterTest();
@@ -161,6 +162,19 @@ public class Main {
}
}
+ static void mulBy1Test() {
+ long res;
+ long j = 1;
+ res = 1 * j + j;
+ if (res == 2L) {
+ System.out.println("mulBy1Test passes");
+ }
+ else {
+ System.out.println("mulBy1Test fails: " + res +
+ " (expecting 2)");
+ }
+ }
+
static void b2296099Test() throws Exception {
int x = -1190771042;
int dist = 360530809;
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index 19266b4f64..56b8674942 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -23,7 +23,7 @@ import string
import sys
-_ENUM_START_RE = re.compile(r'\benum\b\s+(\S+)\s+\{')
+_ENUM_START_RE = re.compile(r'\benum\b\s+(\S+)\s+:?.*\{')
_ENUM_VALUE_RE = re.compile(r'([A-Za-z0-9_]+)(.*)')
_ENUM_END_RE = re.compile(r'^\s*\};$')
_ENUMS = {}