summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk3
-rw-r--r--build/Android.common_test.mk8
-rw-r--r--build/Android.gtest.mk6
-rw-r--r--compiler/compiled_method.cc28
-rw-r--r--compiler/compiled_method.h100
-rw-r--r--compiler/dex/compiler_enums.h1
-rw-r--r--compiler/dex/global_value_numbering.cc7
-rw-r--r--compiler/dex/global_value_numbering.h2
-rw-r--r--compiler/dex/local_value_numbering.cc41
-rw-r--r--compiler/dex/local_value_numbering.h2
-rw-r--r--compiler/dex/mir_graph.cc1
-rw-r--r--compiler/dex/mir_optimization.cc1
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc11
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h35
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc108
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc4
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc12
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc3
-rw-r--r--compiler/dex/quick/codegen_util.cc15
-rw-r--r--compiler/dex/quick/mips/int_mips.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc22
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h35
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc159
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc52
-rw-r--r--compiler/dex/quick/x86/x86_lir.h3
-rw-r--r--compiler/dex/ssa_transformation.cc1
-rw-r--r--compiler/driver/compiler_driver.cc13
-rw-r--r--compiler/driver/compiler_driver.h18
-rw-r--r--compiler/elf_fixup.cc28
-rw-r--r--compiler/elf_writer_quick.cc906
-rw-r--r--compiler/elf_writer_quick.h27
-rw-r--r--compiler/image_writer.cc2
-rw-r--r--compiler/jni/jni_compiler_test.cc307
-rw-r--r--compiler/oat_writer.cc9
-rw-r--r--compiler/oat_writer.h8
-rw-r--r--compiler/optimizing/code_generator.cc10
-rw-r--r--compiler/optimizing/code_generator.h4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc6
-rw-r--r--compiler/optimizing/register_allocator.cc1
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc1
-rw-r--r--compiler/utils/arena_bit_vector.cc5
-rw-r--r--compiler/utils/arena_bit_vector.h20
-rw-r--r--compiler/utils/dwarf_cfi.cc82
-rw-r--r--compiler/utils/dwarf_cfi.h10
-rw-r--r--compiler/utils/x86/assembler_x86.cc6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc6
-rw-r--r--disassembler/disassembler_x86.cc12
-rw-r--r--patchoat/patchoat.cc21
-rw-r--r--runtime/Android.mk4
-rw-r--r--runtime/arch/arm/arm_sdiv.S2
-rw-r--r--runtime/arch/arm/asm_support_arm.S27
-rw-r--r--runtime/arch/arm/portable_entrypoints_arm.S2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S37
-rw-r--r--runtime/arch/arm64/asm_support_arm64.S16
-rw-r--r--runtime/arch/arm64/portable_entrypoints_arm64.S2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S4
-rw-r--r--runtime/arch/memcmp16.cc12
-rw-r--r--runtime/arch/memcmp16.h13
-rw-r--r--runtime/arch/memcmp16_test.cc2
-rw-r--r--runtime/arch/mips/asm_support_mips.h4
-rw-r--r--runtime/arch/stub_test.cc190
-rw-r--r--runtime/arch/x86/asm_support_x86.S12
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc4
-rw-r--r--runtime/arch/x86/portable_entrypoints_x86.S2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S95
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S35
-rw-r--r--runtime/arch/x86_64/portable_entrypoints_x86_64.S2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S72
-rw-r--r--runtime/base/allocator.cc4
-rw-r--r--runtime/base/bit_vector-inl.h80
-rw-r--r--runtime/base/bit_vector.cc211
-rw-r--r--runtime/base/bit_vector.h406
-rw-r--r--runtime/base/bit_vector_test.cc3
-rw-r--r--runtime/check_jni.cc10
-rw-r--r--runtime/class_linker.cc353
-rw-r--r--runtime/class_linker.h21
-rw-r--r--runtime/common_runtime_test.cc13
-rw-r--r--runtime/common_runtime_test.h12
-rw-r--r--runtime/debugger.cc5
-rw-r--r--runtime/elf_file.cc314
-rw-r--r--runtime/elf_file.h2
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc10
-rw-r--r--runtime/gc/accounting/card_table-inl.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc55
-rw-r--r--runtime/gc/accounting/mod_union_table.h7
-rw-r--r--runtime/gc/heap-inl.h4
-rw-r--r--runtime/gc/heap.cc41
-rw-r--r--runtime/gc/heap.h12
-rw-r--r--runtime/instruction_set.cc13
-rw-r--r--runtime/instruction_set.h2
-rw-r--r--runtime/instruction_set_test.cc1
-rw-r--r--runtime/instrumentation.cc171
-rw-r--r--runtime/instrumentation.h12
-rw-r--r--runtime/java_vm_ext.cc8
-rw-r--r--runtime/jdwp/jdwp_main.cc9
-rw-r--r--runtime/jni_internal.cc1
-rw-r--r--runtime/jni_internal_test.cc15
-rw-r--r--runtime/leb128.h32
-rw-r--r--runtime/method_helper.h5
-rw-r--r--runtime/mirror/art_method.cc20
-rw-r--r--runtime/mirror/art_method.h5
-rw-r--r--runtime/mirror/object.cc3
-rw-r--r--runtime/monitor.cc96
-rw-r--r--runtime/monitor.h35
-rw-r--r--runtime/native/dalvik_system_DexFile.cc15
-rw-r--r--runtime/native/dalvik_system_VMStack.cc8
-rw-r--r--runtime/native_bridge.cc267
-rw-r--r--runtime/native_bridge.h39
-rw-r--r--runtime/native_bridge_art_interface.cc94
-rw-r--r--runtime/native_bridge_art_interface.h34
-rw-r--r--runtime/oat_file.cc59
-rw-r--r--runtime/oat_file.h49
-rw-r--r--runtime/parsed_options.cc10
-rw-r--r--runtime/parsed_options.h2
-rw-r--r--runtime/proxy_test.cc10
-rw-r--r--runtime/quick_exception_handler.cc22
-rw-r--r--runtime/quick_exception_handler.h8
-rw-r--r--runtime/runtime.cc18
-rw-r--r--runtime/runtime.h20
-rw-r--r--runtime/stack.h4
-rw-r--r--runtime/thread.cc31
-rw-r--r--runtime/thread.h10
-rw-r--r--runtime/utils.cc19
-rw-r--r--runtime/verifier/method_verifier.cc6
-rw-r--r--runtime/zip_archive.cc2
-rw-r--r--test/004-JniTest/jni_test.cc232
-rw-r--r--test/004-JniTest/src/Main.java14
-rw-r--r--test/080-oom-throw/src/Main.java3
-rw-r--r--test/115-native-bridge/nativebridge.cc28
-rwxr-xr-xtest/116-nodex2oat/run3
-rw-r--r--test/117-nopatchoat/expected.txt9
-rw-r--r--test/117-nopatchoat/info.txt1
-rw-r--r--test/117-nopatchoat/nopatchoat.cc41
-rwxr-xr-xtest/117-nopatchoat/run36
-rw-r--r--test/117-nopatchoat/src/Main.java48
-rw-r--r--test/702-LargeBranchOffset/build27
-rw-r--r--test/702-LargeBranchOffset/expected.txt5
-rw-r--r--test/702-LargeBranchOffset/info.txt1
-rw-r--r--test/702-LargeBranchOffset/src/Main.java.in47
-rw-r--r--test/Android.libarttest.mk3
-rw-r--r--test/Android.run-test.mk36
-rw-r--r--test/MyClassNatives/MyClassNatives.java12
-rwxr-xr-xtest/run-test11
145 files changed, 3925 insertions, 2059 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index d55f310384..0dcefead38 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -157,7 +157,8 @@ art_cflags := \
-Wno-sign-promo \
-Wno-unused-parameter \
-Wstrict-aliasing \
- -fstrict-aliasing
+ -fstrict-aliasing \
+ -fvisibility=protected
ART_TARGET_CLANG_CFLAGS :=
ART_TARGET_CLANG_CFLAGS_arm :=
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 52d1ee31a5..e2f39496bd 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -33,7 +33,13 @@ ART_TEST_KNOWN_BROKEN := \
test-art-target-run-test-gcstress-optimizing-relocate-004-SignalTest32 \
test-art-target-run-test-gcstress-default-relocate-004-SignalTest32 \
test-art-target-run-test-gcstress-optimizing-no-prebuild-004-SignalTest32 \
- test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32
+ test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32 \
+ test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC64 \
+ test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC64 \
+ test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC64
# List of known failing tests that when executed won't cause test execution to not finish.
# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index dd9f414032..0579f9bb77 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,8 +61,8 @@ ART_GTEST_transaction_test_DEX_DEPS := Transaction
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_OAT_OUT) $(2ND_TARGET_CORE_OAT_OUT)
-ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_JARS)
-ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_JARS)
+ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
# The path for which all the source files are relative, not actually the current directory.
@@ -117,7 +117,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
runtime/parsed_options_test.cc \
- runtime/proxy_test.cc \
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
@@ -128,6 +127,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
+ runtime/proxy_test.cc \
runtime/reflection_test.cc \
compiler/dex/global_value_numbering_test.cc \
compiler/dex/local_value_numbering_test.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f2a8d84731..f9a78be6e8 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -148,16 +148,18 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
+ SrcMap* src_mapping_table,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map,
const std::vector<uint8_t>* cfi_info)
: CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
- mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
- vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
- gc_map_(driver->DeduplicateGCMap(native_gc_map)),
- cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(src_mapping_table->Arrange())),
+ mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
+ vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
+ gc_map_(driver->DeduplicateGCMap(native_gc_map)),
+ cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
}
CompiledMethod::CompiledMethod(CompilerDriver* driver,
@@ -170,6 +172,7 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
: CompiledCode(driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
@@ -182,19 +185,22 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instructio
const std::string& symbol)
: CompiledCode(driver, instruction_set, code, symbol),
frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0), gc_map_(driver->DeduplicateGCMap(gc_map)) {
- mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
+ fp_spill_mask_(0),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+ mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver->DeduplicateGCMap(gc_map)) {
}
CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set,
const std::string& code, const std::string& symbol)
: CompiledCode(driver, instruction_set, code, symbol),
frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0) {
- mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
- gc_map_ = driver->DeduplicateGCMap(std::vector<uint8_t>());
+ fp_spill_mask_(0),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+ mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())) {
}
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index c98d06a01d..d02cbff7fc 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -100,7 +100,97 @@ class CompiledCode {
std::vector<uint32_t> oatdata_offsets_to_compiled_code_offset_;
};
-class CompiledMethod : public CompiledCode {
+class SrcMapElem {
+ public:
+ uint32_t from_;
+ int32_t to_;
+
+ bool operator<(const SrcMapElem& sme) const {
+ uint64_t lhs = (static_cast<uint64_t>(from_) << 32) + to_;
+ uint64_t rhs = (static_cast<uint64_t>(sme.from_) << 32) + sme.to_;
+ return lhs < rhs;
+ }
+
+ operator uint8_t() const {
+ return static_cast<uint8_t>(from_ + to_);
+ }
+};
+
+class SrcMap FINAL : public std::vector<SrcMapElem> {
+ public:
+ struct CompareByTo {
+ bool operator()(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ return lhs.to_ < rhs.to_;
+ }
+ };
+
+ struct CompareByFrom {
+ bool operator()(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ return lhs.from_ < rhs.from_;
+ }
+ };
+
+ void SortByTo() {
+ std::sort(begin(), end(), CompareByTo());
+ }
+
+ void SortByFrom() {
+ std::sort(begin(), end(), CompareByFrom());
+ }
+
+ const_iterator FindByTo(int32_t to) const {
+ return std::lower_bound(begin(), end(), SrcMapElem({0, to}), CompareByTo());
+ }
+
+ SrcMap& Arrange() {
+ SortByTo();
+
+ // Remove duplicate pairs.
+ if (!empty()) {
+ SrcMap tmp;
+ tmp.swap(*this);
+ iterator it = tmp.begin();
+ iterator prev = it;
+ it++;
+ push_back(*prev);
+ for (; it != tmp.end(); it++) {
+ if (prev->from_ != it->from_ || prev->to_ != it->to_) {
+ push_back(*(prev = it));
+ }
+ }
+ }
+ return *this;
+ }
+
+ void DeltaFormat(const SrcMapElem& start, uint32_t highest_pc) {
+ // Convert from abs values to deltas.
+ if (!empty()) {
+ SortByFrom();
+
+ // TODO: one PC can be mapped to several Java src lines.
+ // do we want such a one-to-many correspondence?
+
+ // get rid of the highest values
+ size_t i = size() - 1;
+ for (; i > 0 ; i--) {
+ if ((*this)[i].from_ >= highest_pc) {
+ break;
+ }
+ }
+ this->resize(i + 1);
+
+ for (size_t i = size(); --i >= 1; ) {
+ (*this)[i].from_ -= (*this)[i-1].from_;
+ (*this)[i].to_ -= (*this)[i-1].to_;
+ }
+ DCHECK((*this)[0].from_ >= start.from_);
+ (*this)[0].from_ -= start.from_;
+ (*this)[0].to_ -= start.to_;
+ }
+ }
+};
+
+class CompiledMethod FINAL : public CompiledCode {
public:
// Constructs a CompiledMethod for the non-LLVM compilers.
CompiledMethod(CompilerDriver* driver,
@@ -109,6 +199,7 @@ class CompiledMethod : public CompiledCode {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
+ SrcMap* src_mapping_table,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map,
@@ -145,6 +236,11 @@ class CompiledMethod : public CompiledCode {
return fp_spill_mask_;
}
+ const SrcMap& GetSrcMappingTable() const {
+ DCHECK(src_mapping_table_ != nullptr);
+ return *src_mapping_table_;
+ }
+
const std::vector<uint8_t>& GetMappingTable() const {
DCHECK(mapping_table_ != nullptr);
return *mapping_table_;
@@ -171,6 +267,8 @@ class CompiledMethod : public CompiledCode {
const uint32_t core_spill_mask_;
// For quick code, a bit mask describing spilled FPR callee-save registers.
const uint32_t fp_spill_mask_;
+ // For quick code, a set of pairs (PC, Line) mapping from native PC offset to Java line
+ SrcMap* src_mapping_table_;
// For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
// native PC offset. Size prefixed.
std::vector<uint8_t>* mapping_table_;
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index dcc67c3986..63f3e640df 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -528,6 +528,7 @@ enum FixupKind {
kFixupLoad, // Mostly for immediates.
kFixupVLoad, // FP load which *may* be pc-relative.
kFixupCBxZ, // Cbz, Cbnz.
+ kFixupTBxZ, // Tbz, Tbnz.
kFixupPushPop, // Not really pc relative, but changes size based on args.
kFixupCondBranch, // Conditional branch
kFixupT1Branch, // Thumb1 Unconditional branch
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index d7ef6f0984..3575adeac5 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -56,8 +56,11 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
return nullptr;
}
if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
- last_value_ = kNoValue; // Make bad.
- return nullptr;
+ // If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
+ if (!modifications_allowed_) {
+ last_value_ = kNoValue; // Make bad.
+ return nullptr;
+ }
}
if (allocator == nullptr) {
allocator = allocator_;
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index c06ff6f172..1a38692eea 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -214,7 +214,7 @@ class GlobalValueNumbering {
static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
uint32_t bbs_processed_;
- uint32_t max_bbs_to_process_;
+ uint32_t max_bbs_to_process_; // Doesn't apply after the main GVN has converged.
// We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
// We usually don't check Good() until the end of LVN unless we're about to modify code.
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 5997568216..8b02269998 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -656,13 +656,37 @@ void LocalValueNumbering::MergeEscapedArrayClobberSets(
}
}
-void LocalValueNumbering::MergeNullChecked(const ValueNameSet::value_type& entry,
- ValueNameSet::iterator hint) {
- // Merge null_checked_ for this ref.
- merge_names_.clear();
- merge_names_.resize(gvn_->merge_lvns_.size(), entry);
- if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
- null_checked_.insert(hint, entry);
+void LocalValueNumbering::MergeNullChecked() {
+ DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+ // Find the LVN with the least entries in the set.
+ const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
+ for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+ if (lvn->null_checked_.size() < least_entries_lvn->null_checked_.size()) {
+ least_entries_lvn = lvn;
+ }
+ }
+
+ // For each null-checked value name check if it's null-checked in all the LVNs.
+ for (const auto& value_name : least_entries_lvn->null_checked_) {
+ // Merge null_checked_ for this ref.
+ merge_names_.clear();
+ merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+ if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+ null_checked_.insert(null_checked_.end(), value_name);
+ }
+ }
+
+ // Now check if the least_entries_lvn has a null-check as the last insn.
+ const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
+ if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
+ int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
+ uint32_t value_name = least_entries_lvn->GetSRegValueName(s_reg);
+ merge_names_.clear();
+ merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+ if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+ null_checked_.insert(value_name);
+ }
}
}
@@ -896,8 +920,7 @@ void LocalValueNumbering::Merge(MergeType merge_type) {
IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
// Merge null_checked_. We may later insert more, such as merged object field values.
- MergeSets<ValueNameSet, &LocalValueNumbering::null_checked_,
- &LocalValueNumbering::MergeNullChecked>();
+ MergeNullChecked();
if (merge_type == kCatchMerge) {
// Memory is clobbered. New memory version already created, don't merge aliasing locations.
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 855d66de8b..f6a454bb1d 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -343,11 +343,11 @@ class LocalValueNumbering {
EscapedIFieldClobberSet::iterator hint);
void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
EscapedArrayClobberSet::iterator hint);
- void MergeNullChecked(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
SFieldToValueMap::iterator hint);
void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
IFieldLocToValueMap::iterator hint);
+ void MergeNullChecked();
template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index dee1361a50..16b529a5e6 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -19,6 +19,7 @@
#include <inttypes.h>
#include <queue>
+#include "base/bit_vector-inl.h"
#include "base/stl_util.h"
#include "compiler_internals.h"
#include "dex_file-inl.h"
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index c7dd85c9c2..6658848570 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 2ad11da964..3eb7c83c11 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -383,8 +383,17 @@ bool ArmMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
if (reg_class == kFPReg) {
NewLIR2(kThumb2Vabsd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else if (rl_result.reg.GetLow().GetReg() != rl_src.reg.GetHigh().GetReg()) {
+ // No inconvenient overlap.
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x7fffffff);
} else {
- OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
+ // Inconvenient overlap, use a temp register to preserve the high word of the source.
+ RegStorage rs_tmp = AllocTemp();
+ OpRegCopy(rs_tmp, rl_src.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rs_tmp, 0x7fffffff);
+ FreeTemp(rs_tmp);
}
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a449cbd4f7..d001dd652a 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -116,6 +116,7 @@ enum Arm64ResourceEncodingPos {
#define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
#define IS_SIGNED_IMM9(value) IS_SIGNED_IMM(9, value)
#define IS_SIGNED_IMM12(value) IS_SIGNED_IMM(12, value)
+#define IS_SIGNED_IMM14(value) IS_SIGNED_IMM(14, value)
#define IS_SIGNED_IMM19(value) IS_SIGNED_IMM(19, value)
#define IS_SIGNED_IMM21(value) IS_SIGNED_IMM(21, value)
@@ -355,7 +356,10 @@ enum ArmOpcode {
kA64Sub4rrro, // sub [s1001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
kA64Sub4RRre, // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
kA64Subs3rRd, // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
+ kA64Tst2rl, // tst alias of "ands rzr, rn, #imm".
kA64Tst3rro, // tst alias of "ands rzr, arg1, arg2, arg3".
+ kA64Tbnz3rht, // tbnz imm_6_b5[31] [0110111] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
+ kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
kA64NotWide = 0, // Flag used to select the first instruction variant.
@@ -400,23 +404,24 @@ enum ArmOpDmbOptions {
enum ArmEncodingKind {
// All the formats below are encoded in the same way (as a kFmtBitBlt).
// These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
- kFmtRegW = 0, // Word register (w) or wzr.
- kFmtRegX, // Extended word register (x) or xzr.
- kFmtRegR, // Register with same width as the instruction or zr.
- kFmtRegWOrSp, // Word register (w) or wsp.
- kFmtRegXOrSp, // Extended word register (x) or sp.
- kFmtRegROrSp, // Register with same width as the instruction or sp.
- kFmtRegS, // Single FP reg.
- kFmtRegD, // Double FP reg.
- kFmtRegF, // Single/double FP reg depending on the instruction width.
- kFmtBitBlt, // Bit string using end/start.
+ kFmtRegW = 0, // Word register (w) or wzr.
+ kFmtRegX, // Extended word register (x) or xzr.
+ kFmtRegR, // Register with same width as the instruction or zr.
+ kFmtRegWOrSp, // Word register (w) or wsp.
+ kFmtRegXOrSp, // Extended word register (x) or sp.
+ kFmtRegROrSp, // Register with same width as the instruction or sp.
+ kFmtRegS, // Single FP reg.
+ kFmtRegD, // Double FP reg.
+ kFmtRegF, // Single/double FP reg depending on the instruction width.
+ kFmtBitBlt, // Bit string using end/start.
// Less likely formats.
- kFmtUnused, // Unused field and marks end of formats.
- kFmtImm21, // Sign-extended immediate using [23..5,30..29].
- kFmtShift, // Register shift, 9-bit at [23..21, 15..10]..
- kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
- kFmtSkip, // Unused field, but continue to next.
+ kFmtUnused, // Unused field and marks end of formats.
+ kFmtImm6Shift, // Shift immediate, 6-bit at [31, 23..19].
+ kFmtImm21, // Sign-extended immediate using [23..5,30..29].
+ kFmtShift, // Register shift, 9-bit at [23..21, 15..10]..
+ kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
+ kFmtSkip, // Unused field, but continue to next.
};
// Struct used to define the snippet positions for each A64 opcode.
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 15c89f2f18..5115246fc8 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -89,6 +89,7 @@ namespace art {
* M -> 16-bit shift expression ("" or ", lsl #16" or ", lsl #32"...)
* B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
* H -> operand shift
+ * h -> 6-bit shift immediate
* T -> register shift (either ", lsl #0" or ", lsl #12")
* e -> register extend (e.g. uxtb #1)
* o -> register shift (e.g. lsl #1) for Word registers
@@ -614,10 +615,24 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtRegR, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
"subs", "!0r, !1R, #!2d", kFixupNone),
- ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a000000),
+ ENCODING_MAP(WIDE(kA64Tst2rl), SF_VARIANTS(0x7200001f),
+ kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
+ "tst", "!0r, !1l", kFixupNone),
+ ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a00001f),
kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_USE01 | SETS_CCODES,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
"tst", "!0r, !1r!2o", kFixupNone),
+ // NOTE: Tbz/Tbnz does not require SETS_CCODES, but it may be replaced by some other LIRs
+ // which require SETS_CCODES in the fix-up stage.
+ ENCODING_MAP(WIDE(kA64Tbnz3rht), CUSTOM_VARIANTS(0x37000000, 0x37000000),
+ kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+ "tbnz", "!0r, #!1h, !2t", kFixupTBxZ),
+ ENCODING_MAP(WIDE(kA64Tbz3rht), CUSTOM_VARIANTS(0x36000000, 0x36000000),
+ kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+ "tbz", "!0r, #!1h, !2t", kFixupTBxZ),
ENCODING_MAP(WIDE(kA64Ubfm4rrdd), SF_N_VARIANTS(0x53000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
@@ -787,6 +802,11 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
value |= ((operand & 0x1ffffc) >> 2) << 5;
bits |= value;
break;
+ case kFmtImm6Shift:
+ value = (operand & 0x1f) << 19;
+ value |= ((operand & 0x20) >> 5) << 31;
+ bits |= value;
+ break;
default:
LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name
<< " (" << kind << ")";
@@ -827,11 +847,6 @@ void Arm64Mir2Lir::AssembleLIR() {
*/
int generation = 0;
while (true) {
- // TODO(Arm64): check whether passes and offset adjustments are really necessary.
- // Currently they aren't, as - in the fixups below - LIR are never inserted.
- // Things can be different if jump ranges above 1 MB need to be supported.
- // If they are not, then we can get rid of the assembler retry logic.
-
offset_adjustment = 0;
AssemblerStatus res = kSuccess; // Assume success
generation ^= 1;
@@ -839,13 +854,9 @@ void Arm64Mir2Lir::AssembleLIR() {
lir = first_fixup_;
prev_lir = NULL;
while (lir != NULL) {
- /*
- * NOTE: the lir being considered here will be encoded following the switch (so long as
- * we're not in a retry situation). However, any new non-pc_rel instructions inserted
- * due to retry must be explicitly encoded at the time of insertion. Note that
- * inserted instructions don't need use/def flags, but do need size and pc-rel status
- * properly updated.
- */
+ // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
+ // the time of insertion. Note that inserted instructions don't need use/def flags, but do
+ // need size and pc-rel status properly updated.
lir->offset += offset_adjustment;
// During pass, allows us to tell whether a node has been updated with offset_adjustment yet.
lir->flags.generation = generation;
@@ -861,7 +872,8 @@ void Arm64Mir2Lir::AssembleLIR() {
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+ DCHECK_EQ(delta & 0x3, 0);
+ if (!IS_SIGNED_IMM19(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
}
lir->operands[0] = delta >> 2;
@@ -876,12 +888,75 @@ void Arm64Mir2Lir::AssembleLIR() {
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+ DCHECK_EQ(delta & 0x3, 0);
+ if (!IS_SIGNED_IMM19(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupLoad";
}
lir->operands[1] = delta >> 2;
break;
}
+ case kFixupTBxZ: {
+ int16_t opcode = lir->opcode;
+ RegStorage reg(lir->operands[0] | RegStorage::kValid);
+ int32_t imm = lir->operands[1];
+ DCHECK_EQ(IS_WIDE(opcode), reg.Is64Bit());
+ DCHECK_LT(imm, 64);
+ if (imm >= 32) {
+ DCHECK(IS_WIDE(opcode));
+ } else if (kIsDebugBuild && IS_WIDE(opcode)) {
+ // "tbz/tbnz x0, #imm(<32)" is the same with "tbz/tbnz w0, #imm(<32)", but GCC/oatdump
+ // will disassemble it as "tbz/tbnz w0, #imm(<32)". So unwide the LIR to make the
+ // compiler log behave the same with those disassembler in debug build.
+ // This will also affect tst instruction if it need to be replaced, but there is no
+ // performance difference between "tst Xt" and "tst Wt".
+ lir->opcode = UNWIDE(opcode);
+ lir->operands[0] = As32BitReg(reg).GetReg();
+ }
+
+ // Fix-up branch offset.
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir);
+ CodeOffset pc = lir->offset;
+ CodeOffset target = target_lir->offset +
+ ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
+ int32_t delta = target - pc;
+ DCHECK_EQ(delta & 0x3, 0);
+ // Check if branch offset can be encoded in tbz/tbnz.
+ if (!IS_SIGNED_IMM14(delta >> 2)) {
+ DexOffset dalvik_offset = lir->dalvik_offset;
+ int16_t opcode = lir->opcode;
+ LIR* target = lir->target;
+ // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
+ offset_adjustment -= lir->flags.size;
+ int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+ DCHECK_NE(imm, -1);
+ lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
+ lir->operands[1] = imm;
+ lir->target = nullptr;
+ lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
+ lir->flags.size = EncodingMap[kA64Tst2rl].size;
+ offset_adjustment += lir->flags.size;
+ // Insert "beq/bneq label".
+ opcode = UNWIDE(opcode);
+ DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
+ LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
+ opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+ InsertLIRAfter(lir, new_lir);
+ new_lir->offset = lir->offset + lir->flags.size;
+ new_lir->flags.generation = generation;
+ new_lir->flags.fixup = EncodingMap[kA64B2ct].fixup;
+ new_lir->flags.size = EncodingMap[kA64B2ct].size;
+ offset_adjustment += new_lir->flags.size;
+ // lir no longer pcrel, unlink and link in new_lir.
+ ReplaceFixup(prev_lir, lir, new_lir);
+ prev_lir = new_lir; // Continue with the new instruction.
+ lir = new_lir->u.a.pcrel_next;
+ res = kRetryAll;
+ continue;
+ }
+ lir->operands[2] = delta >> 2;
+ break;
+ }
case kFixupAdr: {
LIR* target_lir = lir->target;
int32_t delta;
@@ -910,6 +985,7 @@ void Arm64Mir2Lir::AssembleLIR() {
}
if (res == kSuccess) {
+ DCHECK_EQ(offset_adjustment, 0);
break;
} else {
assembler_retries++;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 0538c31fb8..eddc3a39be 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -208,9 +208,9 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR2(kA64Ldxr2rX, rw3, rx2);
MarkPossibleNullPointerException(opt_flags);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w3, 0, NULL);
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_x1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index d00c57dee9..d1b9c81d09 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -271,8 +271,12 @@ LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_
ArmOpcode opcode = kA64Cbz2rt;
ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
+ } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
+ ArmOpcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
+ ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+ int value = reg.Is64Bit() ? 63 : 31;
+ branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
}
- // TODO: Use tbz/tbnz for < 0 or >= 0.
}
if (branch == nullptr) {
@@ -856,16 +860,14 @@ bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
// Copy one element.
- OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 2);
- LIR* jmp_to_copy_two = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
OpRegImm(kOpSub, rs_length, 2);
LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
// Copy two elements.
LIR *copy_two = NewLIR0(kPseudoTargetLabel);
- OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 4);
- LIR* jmp_to_copy_four = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
OpRegImm(kOpSub, rs_length, 4);
LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 9b4546a94b..685f8d5492 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -504,6 +504,9 @@ std::string Arm64Mir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned ch
else
strcpy(tbuf, ", DecodeError3");
break;
+ case 'h':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand);
+ break;
default:
strcpy(tbuf, "DecodeError1");
break;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index be79b63931..ebebe70462 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -15,6 +15,7 @@
*/
#include "dex/compiler_internals.h"
+#include "driver/compiler_options.h"
#include "dex_file-inl.h"
#include "gc_map.h"
#include "gc_map_builder.h"
@@ -648,15 +649,19 @@ bool Mir2Lir::VerifyCatchEntries() {
void Mir2Lir::CreateMappingTables() {
+ bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols();
+
uint32_t pc2dex_data_size = 0u;
uint32_t pc2dex_entries = 0u;
uint32_t pc2dex_offset = 0u;
uint32_t pc2dex_dalvik_offset = 0u;
+ uint32_t pc2dex_src_entries = 0u;
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
DCHECK(pc2dex_offset <= tgt_lir->offset);
@@ -677,6 +682,10 @@ void Mir2Lir::CreateMappingTables() {
}
}
+ if (generate_src_map) {
+ src_mapping_table_.reserve(pc2dex_src_entries);
+ }
+
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -692,6 +701,10 @@ void Mir2Lir::CreateMappingTables() {
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ if (generate_src_map && !tgt_lir->flags.is_nop) {
+ src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
+ static_cast<int32_t>(tgt_lir->dalvik_offset)}));
+ }
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
DCHECK(pc2dex_offset <= tgt_lir->offset);
write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
@@ -1088,7 +1101,7 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
CompiledMethod* result =
new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
- core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
+ core_spill_mask_, fp_spill_mask_, &src_mapping_table_, encoded_mapping_table_,
vmap_encoder.GetData(), native_gc_map_, cfi_info.get());
return result;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index ea5698970b..95c1262cda 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -220,9 +220,9 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
int dest_reg_class) {
// Implement as a branch-over.
// TODO: Conditional move?
- LoadConstant(rs_dest, false_val); // Favors false.
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
LoadConstant(rs_dest, true_val);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4b8f794e1e..573bd9143d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1715,6 +1715,8 @@ class Mir2Lir : public Backend {
*/
int live_sreg_;
CodeBuffer code_buffer_;
+ // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_
+ SrcMap src_mapping_table_;
// The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
std::vector<uint8_t> encoded_mapping_table_;
ArenaVector<uint32_t> core_vmap_table_;
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 93b799965c..6173163aa6 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -263,8 +263,10 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0, false }, "Cmc", "" },
{ kX86Shld32RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { 0, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32RRI", "!0r,!1r,!2d" },
+ { kX86Shld32RRC, kShiftRegRegCl, IS_TERTIARY_OP | REG_DEF0_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0x0F, 0xA5, 0, 0, 0, 0, false }, "Shld32RRC", "!0r,!1r,cl" },
{ kX86Shld32MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32MRI", "[!0r+!1d],!2r,!3d" },
{ kX86Shrd32RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { 0, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32RRI", "!0r,!1r,!2d" },
+ { kX86Shrd32RRC, kShiftRegRegCl, IS_TERTIARY_OP | REG_DEF0_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0x0F, 0xAD, 0, 0, 0, 0, false }, "Shrd32RRC", "!0r,!1r,cl" },
{ kX86Shrd32MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32MRI", "[!0r+!1d],!2r,!3d" },
{ kX86Shld64RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { REX_W, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64RRI", "!0r,!1r,!2d" },
{ kX86Shld64MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64MRI", "[!0r+!1d],!2r,!3d" },
@@ -591,6 +593,7 @@ static bool ModrmIsRegReg(const X86EncodingMap* entry) {
case kShiftRegCl: return true;
case kRegCond: return true;
case kRegRegCond: return true;
+ case kShiftRegRegCl: return true;
case kJmp:
switch (entry->opcode) {
case kX86JmpR: return true;
@@ -768,6 +771,9 @@ size_t X86Mir2Lir::GetInsnSize(LIR* lir) {
DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[4]));
return ComputeSize(entry, lir->operands[4], lir->operands[1], lir->operands[0],
lir->operands[3]);
+ case kShiftRegRegCl: // lir operands - 0: reg1, 1: reg2, 2: cl
+ DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[2]));
+ return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
case kRegCond: // lir operands - 0: reg, 1: cond
return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
case kMemCond: // lir operands - 0: base, 1: disp, 2: cond
@@ -1336,6 +1342,19 @@ void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base,
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
+void X86Mir2Lir::EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t raw_cl) {
+ DCHECK_EQ(false, entry->skeleton.r8_form);
+ DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
+ EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
+ uint8_t low_reg1 = LowRegisterBits(raw_reg1);
+ uint8_t low_reg2 = LowRegisterBits(raw_reg2);
+ uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
+ code_buffer_.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
void X86Mir2Lir::EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
int32_t imm) {
DCHECK_EQ(false, entry->skeleton.r8_form);
@@ -1829,6 +1848,9 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
case kShiftMemCl: // lir operands - 0: base, 1:displacement, 2: cl
EmitShiftMemCl(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
+ case kShiftRegRegCl: // lir operands - 0: reg1, 1: reg2, 2: cl
+ EmitShiftRegRegCl(entry, lir->operands[1], lir->operands[0], lir->operands[2]);
+ break;
case kRegCond: // lir operands - 0: reg, 1: condition
EmitRegCond(entry, lir->operands[0], lir->operands[1]);
break;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 1f5b3500a8..7d1e20ea42 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -457,6 +457,8 @@ class X86Mir2Lir : public Mir2Lir {
void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
+ void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
+ int32_t raw_cl);
void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
@@ -478,8 +480,10 @@ class X86Mir2Lir : public Mir2Lir {
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir);
void GenShiftByteVector(BasicBlock *bb, MIR *mir);
- void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
- void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
+ void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
+ uint32_t m4);
+ void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
+ uint32_t m3, uint32_t m4);
void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
@@ -551,7 +555,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenMoveVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
+ * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
+ * the type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -561,7 +566,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenMultiplyVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
+ * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -571,7 +577,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenAddVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
+ * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -581,7 +588,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenSubtractVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
+ * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -591,7 +599,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
+ * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
+ * know the type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -601,7 +610,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
+ * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
+ * to know the type of the vector.
* @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -611,7 +621,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
+ * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
+ * type of the vector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
@@ -619,7 +630,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenAndVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
+ * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -629,7 +641,8 @@ class X86Mir2Lir : public Mir2Lir {
void GenOrVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
+ * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index afa2ae21fe..cc515384d9 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -793,8 +793,115 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
bool X86Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
- if (is_long && cu_->instruction_set == kX86) {
- return false;
+ if (is_long && !cu_->target64) {
+ /*
+ * We want to implement the following algorithm
+ * mov eax, low part of arg1
+ * mov edx, high part of arg1
+ * mov ebx, low part of arg2
+ * mov ecx, high part of arg2
+ * mov edi, eax
+ * sub edi, ebx
+ * mov edi, edx
+ * sbb edi, ecx
+ * is_min ? "cmovgel eax, ebx" : "cmovll eax, ebx"
+ * is_min ? "cmovgel edx, ecx" : "cmovll edx, ecx"
+ *
+ * The algorithm above needs 5 registers: a pair for the first operand
+ * (which later will be used as result), a pair for the second operand
+ * and a temp register (e.g. 'edi') for intermediate calculations.
+ * Ideally we have 6 GP caller-save registers in 32-bit mode. They are:
+ * 'eax', 'ebx', 'ecx', 'edx', 'esi' and 'edi'. So there should be
+ * always enough registers to operate on. Practically, there is a pair
+ * of registers 'edi' and 'esi' which holds promoted values and
+ * sometimes should be treated as 'callee save'. If one of the operands
+ * is in the promoted registers then we have enough register to
+ * operate on. Otherwise there is lack of resources and we have to
+ * save 'edi' before calculations and restore after.
+ */
+
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[2];
+ RegLocation rl_dest = InlineTargetWide(info);
+ int res_vreg, src1_vreg, src2_vreg;
+
+ /*
+ * If the result register is the same as the second element, then we
+ * need to be careful. The reason is that the first copy will
+ * inadvertently clobber the second element with the first one thus
+ * yielding the wrong result. Thus we do a swap in that case.
+ */
+ res_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+ src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+ if (res_vreg == src2_vreg) {
+ std::swap(rl_src1, rl_src2);
+ }
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ // Pick the first integer as min/max.
+ OpRegCopyWide(rl_result.reg, rl_src1.reg);
+
+ /*
+ * If the integers are both in the same register, then there is
+ * nothing else to do because they are equal and we have already
+ * moved one into the result.
+ */
+ src1_vreg = mir_graph_->SRegToVReg(rl_src1.s_reg_low);
+ src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+ if (src1_vreg == src2_vreg) {
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+ }
+
+ // Free registers to make some room for the second operand.
+ // But don't try to free ourselves or promoted registers.
+ if (res_vreg != src1_vreg &&
+ IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) {
+ FreeTemp(rl_src1.reg);
+ }
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+ // Do we have a free register for intermediate calculations?
+ RegStorage tmp = AllocTemp(false);
+ if (tmp == RegStorage::InvalidReg()) {
+ /*
+ * No, will use 'edi'.
+ *
+ * As mentioned above we have 4 temporary and 2 promotable
+ * caller-save registers. Therefore, we assume that a free
+ * register can be allocated only if 'esi' and 'edi' are
+ * already used as operands. If number of promotable registers
+ * increases from 2 to 4 then our assumption fails and operand
+ * data is corrupted.
+ * Let's DCHECK it.
+ */
+ DCHECK(IsTemp(rl_src2.reg.GetLow()) &&
+ IsTemp(rl_src2.reg.GetHigh()) &&
+ IsTemp(rl_result.reg.GetLow()) &&
+ IsTemp(rl_result.reg.GetHigh()));
+ tmp = rs_rDI;
+ NewLIR1(kX86Push32R, tmp.GetReg());
+ }
+
+ // Now we are ready to do calculations.
+ OpRegReg(kOpMov, tmp, rl_result.reg.GetLow());
+ OpRegReg(kOpSub, tmp, rl_src2.reg.GetLow());
+ OpRegReg(kOpMov, tmp, rl_result.reg.GetHigh());
+ OpRegReg(kOpSbc, tmp, rl_src2.reg.GetHigh());
+
+ // Let's put pop 'edi' here to break a bit the dependency chain.
+ if (tmp == rs_rDI) {
+ NewLIR1(kX86Pop32R, tmp.GetReg());
+ }
+
+ // Conditionally move the other integer into the destination register.
+ ConditionCode cc = is_min ? kCondGe : kCondLt;
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
}
// Get the two arguments to the invoke and place them in GP registers.
@@ -3033,7 +3140,53 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
if (!cu_->target64) {
- Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ // Long shift operations in 32-bit. Use shld or shrd to create a 32-bit register filled from
+ // the other half, shift the other half, if the shift amount is less than 32 we're done,
+ // otherwise move one register to the other and place zero or sign bits in the other.
+ LIR* branch;
+ FlushAllRegs();
+ LockCallTemps();
+ LoadValueDirectFixed(rl_shift, rs_rCX);
+ RegStorage r_tmp = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
+ LoadValueDirectWideFixed(rl_src1, r_tmp);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ NewLIR3(kX86Shld32RRC, r_tmp.GetHighReg(), r_tmp.GetLowReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Sal32RC, r_tmp.GetLowReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetHigh(), r_tmp.GetLow());
+ LoadConstant(r_tmp.GetLow(), 0);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Sar32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+ NewLIR2(kX86Sar32RI, r_tmp.GetHighReg(), 31);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(),
+ rs_rCX.GetReg());
+ NewLIR2(kX86Shr32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+ LoadConstant(r_tmp.GetHigh(), 0);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ return;
+ }
+ RegLocation rl_result = LocCReturnWide();
+ StoreValueWide(rl_dest, rl_result);
return;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 95941e0418..8c6aa5fe50 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -760,10 +760,7 @@ bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
// X86_64 can handle any size.
if (cu_->target64) {
- if (size == kReference) {
- return kRefReg;
- }
- return kCoreReg;
+ return RegClassBySize(size);
}
if (UNLIKELY(is_volatile)) {
@@ -1216,7 +1213,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_obj = info->args[0];
RegLocation rl_char = info->args[1];
RegLocation rl_start; // Note: only present in III flavor or IndexOf.
- // RBX is callee-save register in 64-bit mode.
+ // RBX is promotable in 64-bit mode.
RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
int start_value = -1;
@@ -1236,23 +1233,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// EBX or R11: temporary during execution (depending on mode).
// REP SCASW: search instruction.
- FlushReg(rs_rAX);
- Clobber(rs_rAX);
- LockTemp(rs_rAX);
- FlushReg(rs_rCX);
- Clobber(rs_rCX);
- LockTemp(rs_rCX);
- FlushReg(rs_rDX);
- Clobber(rs_rDX);
- LockTemp(rs_rDX);
- FlushReg(rs_tmp);
- Clobber(rs_tmp);
- LockTemp(rs_tmp);
- if (cu_->target64) {
- FlushReg(rs_rDI);
- Clobber(rs_rDI);
- LockTemp(rs_rDI);
- }
+ FlushAllRegs();
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
@@ -1294,7 +1275,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
MarkPossibleNullPointerException(0);
if (!cu_->target64) {
- // EDI is callee-save register in 32-bit mode.
+ // EDI is promotable in 32-bit mode.
NewLIR1(kX86Push32R, rs_rDI.GetReg());
}
@@ -1332,10 +1313,12 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
if (!cu_->target64 && rl_start.location != kLocPhysReg) {
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- {
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, rs_rDI);
- }
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ Load32Disp(rs_rX86_SP, displacement, rs_rDI);
+ // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+ DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+ int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
+ AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
} else {
LoadValueDirectFixed(rl_start, rs_rDI);
}
@@ -1399,15 +1382,6 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
}
StoreValue(rl_dest, rl_return);
-
- FreeTemp(rs_rAX);
- FreeTemp(rs_rCX);
- FreeTemp(rs_rDX);
- FreeTemp(rs_tmp);
- if (cu_->target64) {
- FreeTemp(rs_rDI);
- }
-
return true;
}
@@ -1439,8 +1413,8 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnFrameDescriptionEntry() {
// Generate the FDE for the method.
DCHECK_NE(data_offset_, 0U);
- WriteFDEHeader(cfi_info);
- WriteFDEAddressRange(cfi_info, data_offset_);
+ WriteFDEHeader(cfi_info, cu_->target64);
+ WriteFDEAddressRange(cfi_info, data_offset_, cu_->target64);
// The instructions in the FDE.
if (stack_decrement_ != nullptr) {
@@ -1500,7 +1474,7 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnFrameDescriptionEntry() {
}
PadCFI(cfi_info);
- WriteCFILength(cfi_info);
+ WriteCFILength(cfi_info, cu_->target64);
return cfi_info;
}
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 500c6b84ea..9620cd1296 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -484,8 +484,10 @@ enum X86OpCode {
#undef BinaryShiftOpcode
kX86Cmc,
kX86Shld32RRI,
+ kX86Shld32RRC,
kX86Shld32MRI,
kX86Shrd32RRI,
+ kX86Shrd32RRC,
kX86Shrd32MRI,
kX86Shld64RRI,
kX86Shld64MRI,
@@ -675,6 +677,7 @@ enum X86EncodingKind {
kMemRegImm, // MRI instruction kinds.
kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
+ kShiftRegRegCl,
// kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
kRegRegCond, // RR instruction kind followed by a condition.
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index e26745ad5e..4a55de6891 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
#include "utils/scoped_arena_containers.h"
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 645fc1c155..3915381bb5 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -354,6 +354,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
compiler_get_method_code_addr_(NULL),
support_boot_image_fixup_(instruction_set != kMips),
dedupe_code_("dedupe code"),
+ dedupe_src_mapping_table_("dedupe source mapping table"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
dedupe_gc_map_("dedupe gc map"),
@@ -390,6 +391,10 @@ std::vector<uint8_t>* CompilerDriver::DeduplicateCode(const std::vector<uint8_t>
return dedupe_code_.Add(Thread::Current(), code);
}
+SrcMap* CompilerDriver::DeduplicateSrcMappingTable(const SrcMap& src_map) {
+ return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
+}
+
std::vector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const std::vector<uint8_t>& code) {
return dedupe_mapping_table_.Add(Thread::Current(), code);
}
@@ -1524,7 +1529,13 @@ static bool SkipClass(ClassLinker* class_linker, jobject class_loader, const Dex
return true;
}
- return SkipClassCheckClassPath(descriptor, dex_file, dex_files);
+ if (dex_files.size() > 1) {
+ // Multi-dex compilation, only take first class.
+ return SkipClassCheckClassPath(descriptor, dex_file, dex_files);
+ } else {
+ // Single dex, take everything.
+ return false;
+ }
}
// A fast version of SkipClass above if the class pointer is available
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 233c4f887b..d8f318ba97 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -601,6 +601,7 @@ class CompilerDriver {
LOCKS_EXCLUDED(compiled_classes_lock_);
std::vector<uint8_t>* DeduplicateCode(const std::vector<uint8_t>& code);
+ SrcMap* DeduplicateSrcMappingTable(const SrcMap& src_map);
std::vector<uint8_t>* DeduplicateMappingTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateVMapTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
@@ -770,14 +771,15 @@ class CompilerDriver {
bool support_boot_image_fixup_;
// DeDuplication data structures, these own the corresponding byte arrays.
+ template <typename ByteArray>
class DedupeHashFunc {
public:
- size_t operator()(const std::vector<uint8_t>& array) const {
+ size_t operator()(const ByteArray& array) const {
// For small arrays compute a hash using every byte.
static const size_t kSmallArrayThreshold = 16;
size_t hash = 0x811c9dc5;
if (array.size() <= kSmallArrayThreshold) {
- for (uint8_t b : array) {
+ for (auto b : array) {
hash = (hash * 16777619) ^ b;
}
} else {
@@ -803,11 +805,13 @@ class CompilerDriver {
return hash;
}
};
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_code_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_cfi_info_;
+
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_code_;
+ DedupeSet<SrcMap, size_t, DedupeHashFunc<SrcMap>, 4> dedupe_src_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_vmap_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_cfi_info_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 60f76efed0..bbfbc6ece0 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -38,28 +38,32 @@ bool ElfFixup::Fixup(File* file, uintptr_t oat_data_begin) {
Elf32_Off base_address = oat_data_begin - oatdata_address;
if (!FixupDynamic(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup .dynamic in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .dynamic in " << file->GetPath();
+ return false;
}
if (!FixupSectionHeaders(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup section headers in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup section headers in " << file->GetPath();
+ return false;
}
if (!FixupProgramHeaders(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup program headers in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup program headers in " << file->GetPath();
+ return false;
}
if (!FixupSymbols(*elf_file.get(), base_address, true)) {
- LOG(WARNING) << "Failed fo fixup .dynsym in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .dynsym in " << file->GetPath();
+ return false;
}
if (!FixupSymbols(*elf_file.get(), base_address, false)) {
- LOG(WARNING) << "Failed fo fixup .symtab in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .symtab in " << file->GetPath();
+ return false;
}
if (!FixupRelocations(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup .rel.dyn in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .rel.dyn in " << file->GetPath();
+ return false;
+ }
+ if (!elf_file->FixupDebugSections(base_address)) {
+ LOG(WARNING) << "Failed to fixup debug sections in " << file->GetPath();
+ return false;
}
return true;
}
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index bb5f7e0f9d..e45eb61030 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -16,11 +16,14 @@
#include "elf_writer_quick.h"
+#include <unordered_map>
+
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
#include "buffered_output_stream.h"
#include "driver/compiler_driver.h"
#include "dwarf.h"
+#include "elf_file.h"
#include "elf_utils.h"
#include "file_output_stream.h"
#include "globals.h"
@@ -39,6 +42,30 @@ static uint8_t MakeStInfo(uint8_t binding, uint8_t type) {
return ((binding) << 4) + ((type) & 0xf);
}
+static void PushByte(std::vector<uint8_t>* buf, int data) {
+ buf->push_back(data & 0xff);
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const char* str, const char* def = nullptr) {
+ if (str == nullptr) {
+ str = def;
+ }
+
+ uint32_t offset = buf->size();
+ for (size_t i = 0; str[i] != '\0'; ++i) {
+ buf->push_back(str[i]);
+ }
+ buf->push_back('\0');
+ return offset;
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const std::string &str) {
+ uint32_t offset = buf->size();
+ buf->insert(buf->end(), str.begin(), str.end());
+ buf->push_back('\0');
+ return offset;
+}
+
static void UpdateWord(std::vector<uint8_t>* buf, int offset, int data) {
(*buf)[offset+0] = data;
(*buf)[offset+1] = data >> 8;
@@ -51,7 +78,7 @@ static void PushHalf(std::vector<uint8_t>* buf, int data) {
buf->push_back((data >> 8) & 0xff);
}
-bool ElfWriterQuick::ElfBuilder::Write() {
+bool ElfWriterQuick::ElfBuilder::Init() {
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
// | Elf32_Ehdr |
@@ -120,16 +147,19 @@ bool ElfWriterQuick::ElfBuilder::Write() {
// | .debug_str\0 | (Optional)
// | .debug_info\0 | (Optional)
// | .eh_frame\0 | (Optional)
+ // | .debug_line\0 | (Optional)
// | .debug_abbrev\0 | (Optional)
// +-------------------------+ (Optional)
- // | .debug_str | (Optional)
- // +-------------------------+ (Optional)
// | .debug_info | (Optional)
// +-------------------------+ (Optional)
+ // | .debug_abbrev | (Optional)
+ // +-------------------------+ (Optional)
// | .eh_frame | (Optional)
// +-------------------------+ (Optional)
- // | .debug_abbrev | (Optional)
- // +-------------------------+
+ // | .debug_line | (Optional)
+ // +-------------------------+ (Optional)
+ // | .debug_str | (Optional)
+ // +-------------------------+ (Optional)
// | Elf32_Shdr NULL |
// | Elf32_Shdr .dynsym |
// | Elf32_Shdr .dynstr |
@@ -138,173 +168,117 @@ bool ElfWriterQuick::ElfBuilder::Write() {
// | Elf32_Shdr .rodata |
// | Elf32_Shdr .dynamic |
// | Elf32_Shdr .shstrtab |
- // | Elf32_Shdr .debug_str | (Optional)
// | Elf32_Shdr .debug_info | (Optional)
- // | Elf32_Shdr .eh_frame | (Optional)
// | Elf32_Shdr .debug_abbrev| (Optional)
+ // | Elf32_Shdr .eh_frame | (Optional)
+ // | Elf32_Shdr .debug_line | (Optional)
+ // | Elf32_Shdr .debug_str | (Optional)
// +-------------------------+
-
if (fatal_error_) {
return false;
}
// Step 1. Figure out all the offsets.
- // What phdr is.
- uint32_t phdr_offset = sizeof(Elf32_Ehdr);
- const uint8_t PH_PHDR = 0;
- const uint8_t PH_LOAD_R__ = 1;
- const uint8_t PH_LOAD_R_X = 2;
- const uint8_t PH_LOAD_RW_ = 3;
- const uint8_t PH_DYNAMIC = 4;
- const uint8_t PH_NUM = 5;
- uint32_t phdr_size = sizeof(Elf32_Phdr) * PH_NUM;
if (debug_logging_) {
- LOG(INFO) << "phdr_offset=" << phdr_offset << std::hex << " " << phdr_offset;
- LOG(INFO) << "phdr_size=" << phdr_size << std::hex << " " << phdr_size;
+ LOG(INFO) << "phdr_offset=" << PHDR_OFFSET << std::hex << " " << PHDR_OFFSET;
+ LOG(INFO) << "phdr_size=" << PHDR_SIZE << std::hex << " " << PHDR_SIZE;
}
- Elf32_Phdr program_headers[PH_NUM];
- memset(&program_headers, 0, sizeof(program_headers));
- program_headers[PH_PHDR].p_type = PT_PHDR;
- program_headers[PH_PHDR].p_offset = phdr_offset;
- program_headers[PH_PHDR].p_vaddr = phdr_offset;
- program_headers[PH_PHDR].p_paddr = phdr_offset;
- program_headers[PH_PHDR].p_filesz = sizeof(program_headers);
- program_headers[PH_PHDR].p_memsz = sizeof(program_headers);
- program_headers[PH_PHDR].p_flags = PF_R;
- program_headers[PH_PHDR].p_align = sizeof(Elf32_Word);
-
- program_headers[PH_LOAD_R__].p_type = PT_LOAD;
- program_headers[PH_LOAD_R__].p_offset = 0;
- program_headers[PH_LOAD_R__].p_vaddr = 0;
- program_headers[PH_LOAD_R__].p_paddr = 0;
- program_headers[PH_LOAD_R__].p_flags = PF_R;
-
- program_headers[PH_LOAD_R_X].p_type = PT_LOAD;
- program_headers[PH_LOAD_R_X].p_flags = PF_R | PF_X;
-
- program_headers[PH_LOAD_RW_].p_type = PT_LOAD;
- program_headers[PH_LOAD_RW_].p_flags = PF_R | PF_W;
-
- program_headers[PH_DYNAMIC].p_type = PT_DYNAMIC;
- program_headers[PH_DYNAMIC].p_flags = PF_R | PF_W;
+
+ memset(&program_headers_, 0, sizeof(program_headers_));
+ program_headers_[PH_PHDR].p_type = PT_PHDR;
+ program_headers_[PH_PHDR].p_offset = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_vaddr = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_paddr = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_filesz = sizeof(program_headers_);
+ program_headers_[PH_PHDR].p_memsz = sizeof(program_headers_);
+ program_headers_[PH_PHDR].p_flags = PF_R;
+ program_headers_[PH_PHDR].p_align = sizeof(Elf32_Word);
+
+ program_headers_[PH_LOAD_R__].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_R__].p_offset = 0;
+ program_headers_[PH_LOAD_R__].p_vaddr = 0;
+ program_headers_[PH_LOAD_R__].p_paddr = 0;
+ program_headers_[PH_LOAD_R__].p_flags = PF_R;
+
+ program_headers_[PH_LOAD_R_X].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_R_X].p_flags = PF_R | PF_X;
+
+ program_headers_[PH_LOAD_RW_].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_RW_].p_flags = PF_R | PF_W;
+
+ program_headers_[PH_DYNAMIC].p_type = PT_DYNAMIC;
+ program_headers_[PH_DYNAMIC].p_flags = PF_R | PF_W;
// Get the dynstr string.
- std::string dynstr(dynsym_builder_.GenerateStrtab());
+ dynstr_ = dynsym_builder_.GenerateStrtab();
// Add the SONAME to the dynstr.
- uint32_t dynstr_soname_offset = dynstr.size();
+ dynstr_soname_offset_ = dynstr_.size();
std::string file_name(elf_file_->GetPath());
size_t directory_separator_pos = file_name.rfind('/');
if (directory_separator_pos != std::string::npos) {
file_name = file_name.substr(directory_separator_pos + 1);
}
- dynstr += file_name;
- dynstr += '\0';
+ dynstr_ += file_name;
+ dynstr_ += '\0';
if (debug_logging_) {
- LOG(INFO) << "dynstr size (bytes) =" << dynstr.size()
- << std::hex << " " << dynstr.size();
+ LOG(INFO) << "dynstr size (bytes) =" << dynstr_.size()
+ << std::hex << " " << dynstr_.size();
LOG(INFO) << "dynsym size (elements)=" << dynsym_builder_.GetSize()
<< std::hex << " " << dynsym_builder_.GetSize();
}
- // get the strtab
- std::string strtab;
- if (IncludingDebugSymbols()) {
- strtab = symtab_builder_.GenerateStrtab();
- if (debug_logging_) {
- LOG(INFO) << "strtab size (bytes) =" << strtab.size()
- << std::hex << " " << strtab.size();
- LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
- << std::hex << " " << symtab_builder_.GetSize();
- }
- }
-
// Get the section header string table.
- std::vector<Elf32_Shdr*> section_ptrs;
- std::string shstrtab;
- shstrtab += '\0';
+ shstrtab_ += '\0';
// Setup sym_undef
- Elf32_Shdr null_hdr;
- memset(&null_hdr, 0, sizeof(null_hdr));
- null_hdr.sh_type = SHT_NULL;
- null_hdr.sh_link = SHN_UNDEF;
- section_ptrs.push_back(&null_hdr);
+ memset(&null_hdr_, 0, sizeof(null_hdr_));
+ null_hdr_.sh_type = SHT_NULL;
+ null_hdr_.sh_link = SHN_UNDEF;
+ section_ptrs_.push_back(&null_hdr_);
- uint32_t section_index = 1;
+ section_index_ = 1;
// setup .dynsym
- section_ptrs.push_back(&dynsym_builder_.section_);
- AssignSectionStr(&dynsym_builder_, &shstrtab);
- dynsym_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&dynsym_builder_.section_);
+ AssignSectionStr(&dynsym_builder_, &shstrtab_);
+ dynsym_builder_.section_index_ = section_index_++;
// Setup .dynstr
- section_ptrs.push_back(&dynsym_builder_.strtab_.section_);
- AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab);
- dynsym_builder_.strtab_.section_index_ = section_index++;
+ section_ptrs_.push_back(&dynsym_builder_.strtab_.section_);
+ AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab_);
+ dynsym_builder_.strtab_.section_index_ = section_index_++;
// Setup .hash
- section_ptrs.push_back(&hash_builder_.section_);
- AssignSectionStr(&hash_builder_, &shstrtab);
- hash_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&hash_builder_.section_);
+ AssignSectionStr(&hash_builder_, &shstrtab_);
+ hash_builder_.section_index_ = section_index_++;
// Setup .rodata
- section_ptrs.push_back(&rodata_builder_.section_);
- AssignSectionStr(&rodata_builder_, &shstrtab);
- rodata_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&rodata_builder_.section_);
+ AssignSectionStr(&rodata_builder_, &shstrtab_);
+ rodata_builder_.section_index_ = section_index_++;
// Setup .text
- section_ptrs.push_back(&text_builder_.section_);
- AssignSectionStr(&text_builder_, &shstrtab);
- text_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&text_builder_.section_);
+ AssignSectionStr(&text_builder_, &shstrtab_);
+ text_builder_.section_index_ = section_index_++;
// Setup .dynamic
- section_ptrs.push_back(&dynamic_builder_.section_);
- AssignSectionStr(&dynamic_builder_, &shstrtab);
- dynamic_builder_.section_index_ = section_index++;
-
- if (IncludingDebugSymbols()) {
- // Setup .symtab
- section_ptrs.push_back(&symtab_builder_.section_);
- AssignSectionStr(&symtab_builder_, &shstrtab);
- symtab_builder_.section_index_ = section_index++;
-
- // Setup .strtab
- section_ptrs.push_back(&symtab_builder_.strtab_.section_);
- AssignSectionStr(&symtab_builder_.strtab_, &shstrtab);
- symtab_builder_.strtab_.section_index_ = section_index++;
- }
- ElfRawSectionBuilder* it = other_builders_.data();
- for (uint32_t cnt = 0; cnt < other_builders_.size(); ++it, ++cnt) {
- // Setup all the other sections.
- section_ptrs.push_back(&it->section_);
- AssignSectionStr(it, &shstrtab);
- it->section_index_ = section_index++;
- }
-
- // Setup shstrtab
- section_ptrs.push_back(&shstrtab_builder_.section_);
- AssignSectionStr(&shstrtab_builder_, &shstrtab);
- shstrtab_builder_.section_index_ = section_index++;
-
- if (debug_logging_) {
- LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab.size()
- << std::hex << " " << shstrtab.size();
- LOG(INFO) << "section list size (elements)=" << section_ptrs.size()
- << std::hex << " " << section_ptrs.size();
- }
+ section_ptrs_.push_back(&dynamic_builder_.section_);
+ AssignSectionStr(&dynamic_builder_, &shstrtab_);
+ dynamic_builder_.section_index_ = section_index_++;
// Fill in the hash section.
- std::vector<Elf32_Word> hash = dynsym_builder_.GenerateHashContents();
+ hash_ = dynsym_builder_.GenerateHashContents();
if (debug_logging_) {
- LOG(INFO) << ".hash size (bytes)=" << hash.size() * sizeof(Elf32_Word)
- << std::hex << " " << hash.size() * sizeof(Elf32_Word);
+ LOG(INFO) << ".hash size (bytes)=" << hash_.size() * sizeof(Elf32_Word)
+ << std::hex << " " << hash_.size() * sizeof(Elf32_Word);
}
- Elf32_Word base_offset = sizeof(Elf32_Ehdr) + sizeof(program_headers);
- std::vector<ElfFilePiece> pieces;
+ Elf32_Word base_offset = sizeof(Elf32_Ehdr) + sizeof(program_headers_);
// Get the layout in the sections.
//
@@ -318,14 +292,14 @@ bool ElfWriterQuick::ElfBuilder::Write() {
dynsym_builder_.strtab_.section_.sh_offset = NextOffset(dynsym_builder_.strtab_.section_,
dynsym_builder_.section_);
dynsym_builder_.strtab_.section_.sh_addr = dynsym_builder_.strtab_.section_.sh_offset;
- dynsym_builder_.strtab_.section_.sh_size = dynstr.size();
+ dynsym_builder_.strtab_.section_.sh_size = dynstr_.size();
dynsym_builder_.strtab_.section_.sh_link = dynsym_builder_.strtab_.GetLink();
// Get the layout of the hash section
hash_builder_.section_.sh_offset = NextOffset(hash_builder_.section_,
dynsym_builder_.strtab_.section_);
hash_builder_.section_.sh_addr = hash_builder_.section_.sh_offset;
- hash_builder_.section_.sh_size = hash.size() * sizeof(Elf32_Word);
+ hash_builder_.section_.sh_size = hash_.size() * sizeof(Elf32_Word);
hash_builder_.section_.sh_link = hash_builder_.GetLink();
// Get the layout of the rodata section.
@@ -349,7 +323,70 @@ bool ElfWriterQuick::ElfBuilder::Write() {
dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf32_Dyn);
dynamic_builder_.section_.sh_link = dynamic_builder_.GetLink();
+ if (debug_logging_) {
+ LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
+ << " dynsym size=" << dynsym_builder_.section_.sh_size;
+ LOG(INFO) << "dynstr off=" << dynsym_builder_.strtab_.section_.sh_offset
+ << " dynstr size=" << dynsym_builder_.strtab_.section_.sh_size;
+ LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
+ << " hash size=" << hash_builder_.section_.sh_size;
+ LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
+ << " rodata size=" << rodata_builder_.section_.sh_size;
+ LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
+ << " text size=" << text_builder_.section_.sh_size;
+ LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
+ << " dynamic size=" << dynamic_builder_.section_.sh_size;
+ }
+
+ return true;
+}
+
+bool ElfWriterQuick::ElfBuilder::Write() {
+ std::vector<ElfFilePiece> pieces;
Elf32_Shdr prev = dynamic_builder_.section_;
+ std::string strtab;
+
+ if (IncludingDebugSymbols()) {
+ // Setup .symtab
+ section_ptrs_.push_back(&symtab_builder_.section_);
+ AssignSectionStr(&symtab_builder_, &shstrtab_);
+ symtab_builder_.section_index_ = section_index_++;
+
+ // Setup .strtab
+ section_ptrs_.push_back(&symtab_builder_.strtab_.section_);
+ AssignSectionStr(&symtab_builder_.strtab_, &shstrtab_);
+ symtab_builder_.strtab_.section_index_ = section_index_++;
+
+ strtab = symtab_builder_.GenerateStrtab();
+ if (debug_logging_) {
+ LOG(INFO) << "strtab size (bytes) =" << strtab.size()
+ << std::hex << " " << strtab.size();
+ LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
+ << std::hex << " " << symtab_builder_.GetSize();
+ }
+ }
+
+ // Setup all the other sections.
+ for (ElfRawSectionBuilder *builder = other_builders_.data(),
+ *end = builder + other_builders_.size();
+ builder != end; ++builder) {
+ section_ptrs_.push_back(&builder->section_);
+ AssignSectionStr(builder, &shstrtab_);
+ builder->section_index_ = section_index_++;
+ }
+
+ // Setup shstrtab
+ section_ptrs_.push_back(&shstrtab_builder_.section_);
+ AssignSectionStr(&shstrtab_builder_, &shstrtab_);
+ shstrtab_builder_.section_index_ = section_index_++;
+
+ if (debug_logging_) {
+ LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab_.size()
+ << std::hex << " " << shstrtab_.size();
+ LOG(INFO) << "section list size (elements)=" << section_ptrs_.size()
+ << std::hex << " " << section_ptrs_.size();
+ }
+
if (IncludingDebugSymbols()) {
// Get the layout of the symtab section.
symtab_builder_.section_.sh_offset = NextOffset(symtab_builder_.section_,
@@ -367,27 +404,14 @@ bool ElfWriterQuick::ElfBuilder::Write() {
symtab_builder_.strtab_.section_.sh_link = symtab_builder_.strtab_.GetLink();
prev = symtab_builder_.strtab_.section_;
- }
- if (debug_logging_) {
- LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
- << " dynsym size=" << dynsym_builder_.section_.sh_size;
- LOG(INFO) << "dynstr off=" << dynsym_builder_.strtab_.section_.sh_offset
- << " dynstr size=" << dynsym_builder_.strtab_.section_.sh_size;
- LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
- << " hash size=" << hash_builder_.section_.sh_size;
- LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
- << " rodata size=" << rodata_builder_.section_.sh_size;
- LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
- << " text size=" << text_builder_.section_.sh_size;
- LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
- << " dynamic size=" << dynamic_builder_.section_.sh_size;
- if (IncludingDebugSymbols()) {
+ if (debug_logging_) {
LOG(INFO) << "symtab off=" << symtab_builder_.section_.sh_offset
<< " symtab size=" << symtab_builder_.section_.sh_size;
LOG(INFO) << "strtab off=" << symtab_builder_.strtab_.section_.sh_offset
<< " strtab size=" << symtab_builder_.strtab_.section_.sh_size;
}
}
+
// Get the layout of the extra sections. (This will deal with the debug
// sections if they are there)
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
@@ -403,10 +427,11 @@ bool ElfWriterQuick::ElfBuilder::Write() {
<< " " << it->name_ << " size=" << it->section_.sh_size;
}
}
+
// Get the layout of the shstrtab section
shstrtab_builder_.section_.sh_offset = NextOffset(shstrtab_builder_.section_, prev);
shstrtab_builder_.section_.sh_addr = 0;
- shstrtab_builder_.section_.sh_size = shstrtab.size();
+ shstrtab_builder_.section_.sh_size = shstrtab_.size();
shstrtab_builder_.section_.sh_link = shstrtab_builder_.GetLink();
if (debug_logging_) {
LOG(INFO) << "shstrtab off=" << shstrtab_builder_.section_.sh_offset
@@ -430,58 +455,58 @@ bool ElfWriterQuick::ElfBuilder::Write() {
// Setup the dynamic section.
// This will add the 2 values we cannot know until now time, namely the size
// and the soname_offset.
- std::vector<Elf32_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr.size(),
- dynstr_soname_offset);
+ std::vector<Elf32_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
+ dynstr_soname_offset_);
CHECK_EQ(dynamic.size() * sizeof(Elf32_Dyn), dynamic_builder_.section_.sh_size);
// Finish setup of the program headers now that we know the layout of the
// whole file.
Elf32_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
- program_headers[PH_LOAD_R__].p_filesz = load_r_size;
- program_headers[PH_LOAD_R__].p_memsz = load_r_size;
- program_headers[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
+ program_headers_[PH_LOAD_R__].p_memsz = load_r_size;
+ program_headers_[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
Elf32_Word load_rx_size = text_builder_.section_.sh_size;
- program_headers[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_filesz = load_rx_size;
- program_headers[PH_LOAD_R_X].p_memsz = load_rx_size;
- program_headers[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
-
- program_headers[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
-
- program_headers[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
+ program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
+ program_headers_[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
+
+ program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
+
+ program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
// Finish setup of the Ehdr values.
- elf_header_.e_phoff = phdr_offset;
+ elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
elf_header_.e_phnum = PH_NUM;
- elf_header_.e_shnum = section_ptrs.size();
+ elf_header_.e_shnum = section_ptrs_.size();
elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
// Add the rest of the pieces to the list.
pieces.push_back(ElfFilePiece("Elf Header", 0, &elf_header_, sizeof(elf_header_)));
- pieces.push_back(ElfFilePiece("Program headers", phdr_offset,
- &program_headers, sizeof(program_headers)));
+ pieces.push_back(ElfFilePiece("Program headers", PHDR_OFFSET,
+ &program_headers_, sizeof(program_headers_)));
pieces.push_back(ElfFilePiece(".dynamic", dynamic_builder_.section_.sh_offset,
dynamic.data(), dynamic_builder_.section_.sh_size));
pieces.push_back(ElfFilePiece(".dynsym", dynsym_builder_.section_.sh_offset,
dynsym.data(), dynsym.size() * sizeof(Elf32_Sym)));
pieces.push_back(ElfFilePiece(".dynstr", dynsym_builder_.strtab_.section_.sh_offset,
- dynstr.c_str(), dynstr.size()));
+ dynstr_.c_str(), dynstr_.size()));
pieces.push_back(ElfFilePiece(".hash", hash_builder_.section_.sh_offset,
- hash.data(), hash.size() * sizeof(Elf32_Word)));
+ hash_.data(), hash_.size() * sizeof(Elf32_Word)));
pieces.push_back(ElfFilePiece(".rodata", rodata_builder_.section_.sh_offset,
nullptr, rodata_builder_.section_.sh_size));
pieces.push_back(ElfFilePiece(".text", text_builder_.section_.sh_offset,
@@ -493,13 +518,13 @@ bool ElfWriterQuick::ElfBuilder::Write() {
strtab.c_str(), strtab.size()));
}
pieces.push_back(ElfFilePiece(".shstrtab", shstrtab_builder_.section_.sh_offset,
- &shstrtab[0], shstrtab.size()));
- for (uint32_t i = 0; i < section_ptrs.size(); ++i) {
+ &shstrtab_[0], shstrtab_.size()));
+ for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
// Just add all the sections in induvidually since they are all over the
// place on the heap/stack.
Elf32_Word cur_off = sections_offset + i * sizeof(Elf32_Shdr);
pieces.push_back(ElfFilePiece("section table piece", cur_off,
- section_ptrs[i], sizeof(Elf32_Shdr)));
+ section_ptrs_[i], sizeof(Elf32_Shdr)));
}
if (!WriteOutFile(pieces)) {
@@ -664,7 +689,7 @@ std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents()
// Lets say the state is something like this.
// +--------+ +--------+ +-----------+
// | symtab | | bucket | | chain |
- // | nullptr | | 1 | | STN_UNDEF |
+ // | null | | 1 | | STN_UNDEF |
// | <sym1> | | 4 | | 2 |
// | <sym2> | | | | 5 |
// | <sym3> | | | | STN_UNDEF |
@@ -836,13 +861,24 @@ void ElfWriterQuick::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug)
}
std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
- std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+ std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
// Length (will be filled in later in this routine).
- PushWord(cfi_info, 0);
+ if (is_x86_64) {
+ PushWord(cfi_info, 0xffffffff); // Indicates 64bit
+ PushWord(cfi_info, 0);
+ PushWord(cfi_info, 0);
+ } else {
+ PushWord(cfi_info, 0);
+ }
// CIE id: always 0.
- PushWord(cfi_info, 0);
+ if (is_x86_64) {
+ PushWord(cfi_info, 0);
+ PushWord(cfi_info, 0);
+ } else {
+ PushWord(cfi_info, 0);
+ }
// Version: always 1.
cfi_info->push_back(0x01);
@@ -874,8 +910,14 @@ std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
// Augmentation length: 1.
cfi_info->push_back(1);
- // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
- cfi_info->push_back(0x03);
+ // Augmentation data.
+ if (is_x86_64) {
+ // 0x04 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata8).
+ cfi_info->push_back(0x04);
+ } else {
+ // 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+ cfi_info->push_back(0x03);
+ }
// Initial instructions.
if (is_x86_64) {
@@ -905,11 +947,13 @@ std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
}
// Set the length of the CIE inside the generated bytes.
- uint32_t length = cfi_info->size() - 4;
- (*cfi_info)[0] = length;
- (*cfi_info)[1] = length >> 8;
- (*cfi_info)[2] = length >> 16;
- (*cfi_info)[3] = length >> 24;
+ if (is_x86_64) {
+ uint32_t length = cfi_info->size() - 12;
+ UpdateWord(cfi_info, 4, length);
+ } else {
+ uint32_t length = cfi_info->size() - 4;
+ UpdateWord(cfi_info, 0, length);
+ }
return cfi_info;
}
@@ -940,8 +984,12 @@ bool ElfWriterQuick::Write(OatWriter* oat_writer,
compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
debug);
+ if (!builder.Init()) {
+ return false;
+ }
+
if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
- WriteDebugSymbols(builder, oat_writer);
+ WriteDebugSymbols(&builder, oat_writer);
}
if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
@@ -954,15 +1002,17 @@ bool ElfWriterQuick::Write(OatWriter* oat_writer,
return builder.Write();
}
-void ElfWriterQuick::WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer) {
+void ElfWriterQuick::WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer) {
std::unique_ptr<std::vector<uint8_t>> cfi_info(
ConstructCIEFrame(compiler_driver_->GetInstructionSet()));
+ Elf32_Addr text_section_address = builder->text_builder_.section_.sh_addr;
+
// Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
- ElfSymtabBuilder* symtab = &builder.symtab_builder_;
+ ElfSymtabBuilder* symtab = &builder->symtab_builder_;
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
- symtab->AddSymbol(it->method_name_, &builder.text_builder_, it->low_pc_, true,
+ symtab->AddSymbol(it->method_name_, &builder->text_builder_, it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
// Include CFI for compiled method, if possible.
@@ -976,96 +1026,314 @@ void ElfWriterQuick::WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_write
int cur_offset = cfi_info->size();
cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
- // Set the 'CIE_pointer' field to cur_offset+4.
- uint32_t CIE_pointer = cur_offset + 4;
- uint32_t offset_to_update = cur_offset + sizeof(uint32_t);
- (*cfi_info)[offset_to_update+0] = CIE_pointer;
- (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
- (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
- (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
-
- // Set the 'initial_location' field to address the start of the method.
- offset_to_update = cur_offset + 2*sizeof(uint32_t);
- const uint32_t quick_code_start = it->low_pc_;
- (*cfi_info)[offset_to_update+0] = quick_code_start;
- (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
- (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
- (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ bool is_64bit = *(reinterpret_cast<const uint32_t*>(fde->data())) == 0xffffffff;
+
+ // Set the 'CIE_pointer' field.
+ uint64_t CIE_pointer = cur_offset + (is_64bit ? 12 : 4);
+ uint64_t offset_to_update = CIE_pointer;
+ if (is_64bit) {
+ (*cfi_info)[offset_to_update+0] = CIE_pointer;
+ (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+ (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+ (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+ (*cfi_info)[offset_to_update+4] = CIE_pointer >> 32;
+ (*cfi_info)[offset_to_update+5] = CIE_pointer >> 40;
+ (*cfi_info)[offset_to_update+6] = CIE_pointer >> 48;
+ (*cfi_info)[offset_to_update+7] = CIE_pointer >> 56;
+ } else {
+ (*cfi_info)[offset_to_update+0] = CIE_pointer;
+ (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+ (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+ (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+ }
+
+ // Set the 'initial_location' field.
+ offset_to_update += is_64bit ? 8 : 4;
+ if (is_64bit) {
+ const uint64_t quick_code_start = it->low_pc_ + text_section_address;
+ (*cfi_info)[offset_to_update+0] = quick_code_start;
+ (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+ (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+ (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ (*cfi_info)[offset_to_update+4] = quick_code_start >> 32;
+ (*cfi_info)[offset_to_update+5] = quick_code_start >> 40;
+ (*cfi_info)[offset_to_update+6] = quick_code_start >> 48;
+ (*cfi_info)[offset_to_update+7] = quick_code_start >> 56;
+ } else {
+ const uint32_t quick_code_start = it->low_pc_ + text_section_address;
+ (*cfi_info)[offset_to_update+0] = quick_code_start;
+ (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+ (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+ (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ }
}
}
}
- if (cfi_info.get() != nullptr) {
- // Now lay down the Elf sections.
- ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ bool hasCFI = (cfi_info.get() != nullptr);
+ bool hasLineInfo = false;
+ for (auto& dbg_info : oat_writer->GetCFIMethodInfo()) {
+ if (dbg_info.dbgstream_ != nullptr &&
+ !dbg_info.compiled_method_->GetSrcMappingTable().empty()) {
+ hasLineInfo = true;
+ break;
+ }
+ }
+
+ if (hasLineInfo || hasCFI) {
+ ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
- eh_frame.SetBuffer(std::move(*cfi_info.get()));
-
- FillInCFIInformation(oat_writer, debug_info.GetBuffer(), debug_abbrev.GetBuffer(),
- debug_str.GetBuffer());
- builder.RegisterRawSection(debug_info);
- builder.RegisterRawSection(debug_abbrev);
- builder.RegisterRawSection(eh_frame);
- builder.RegisterRawSection(debug_str);
+ ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+
+ FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
+ debug_abbrev.GetBuffer(), debug_str.GetBuffer(),
+ hasLineInfo ? debug_line.GetBuffer() : nullptr,
+ text_section_address);
+
+ builder->RegisterRawSection(debug_info);
+ builder->RegisterRawSection(debug_abbrev);
+
+ if (hasCFI) {
+ ElfRawSectionBuilder eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ eh_frame.SetBuffer(std::move(*cfi_info.get()));
+ builder->RegisterRawSection(eh_frame);
+ }
+
+ if (hasLineInfo) {
+ builder->RegisterRawSection(debug_line);
+ }
+
+ builder->RegisterRawSection(debug_str);
+ }
+}
+
+class LineTableGenerator FINAL : public Leb128Encoder {
+ public:
+ LineTableGenerator(int line_base, int line_range, int opcode_base,
+ std::vector<uint8_t>* data, uintptr_t current_address,
+ size_t current_line)
+ : Leb128Encoder(data), line_base_(line_base), line_range_(line_range),
+ opcode_base_(opcode_base), current_address_(current_address),
+ current_line_(current_line) {}
+
+ void PutDelta(unsigned delta_addr, int delta_line) {
+ current_line_ += delta_line;
+ current_address_ += delta_addr;
+
+ if (delta_line >= line_base_ && delta_line < line_base_ + line_range_) {
+ unsigned special_opcode = (delta_line - line_base_) +
+ (line_range_ * delta_addr) + opcode_base_;
+ if (special_opcode <= 255) {
+ PushByte(data_, special_opcode);
+ return;
+ }
+ }
+
+ // generate standart opcode for address advance
+ if (delta_addr != 0) {
+ PushByte(data_, DW_LNS_advance_pc);
+ PushBackUnsigned(delta_addr);
+ }
+
+ // generate standart opcode for line delta
+ if (delta_line != 0) {
+ PushByte(data_, DW_LNS_advance_line);
+ PushBackSigned(delta_line);
+ }
+
+ // generate standart opcode for new LTN entry
+ PushByte(data_, DW_LNS_copy);
+ }
+
+ void SetAddr(uintptr_t addr) {
+ if (current_address_ == addr) {
+ return;
+ }
+
+ current_address_ = addr;
+
+ PushByte(data_, 0); // extended opcode:
+ PushByte(data_, 1 + 4); // length: opcode_size + address_size
+ PushByte(data_, DW_LNE_set_address);
+ PushWord(data_, addr);
+ }
+
+ void SetLine(unsigned line) {
+ int delta_line = line - current_line_;
+ if (delta_line) {
+ current_line_ = line;
+ PushByte(data_, DW_LNS_advance_line);
+ PushBackSigned(delta_line);
+ }
+ }
+
+ void SetFile(unsigned file_index) {
+ PushByte(data_, DW_LNS_set_file);
+ PushBackUnsigned(file_index);
+ }
+
+ void EndSequence() {
+ // End of Line Table Program
+ // 0(=ext), 1(len), DW_LNE_end_sequence
+ PushByte(data_, 0);
+ PushByte(data_, 1);
+ PushByte(data_, DW_LNE_end_sequence);
+ }
+
+ private:
+ const int line_base_;
+ const int line_range_;
+ const int opcode_base_;
+ uintptr_t current_address_;
+ size_t current_line_;
+
+ DISALLOW_COPY_AND_ASSIGN(LineTableGenerator);
+};
+
+// TODO: rewriting it using DexFile::DecodeDebugInfo needs unneeded stuff.
+static void GetLineInfoForJava(const uint8_t* dbgstream, const SrcMap& pc2dex,
+ SrcMap* result, uint32_t start_pc = 0) {
+ if (dbgstream == nullptr) {
+ return;
+ }
+
+ int adjopcode;
+ uint32_t dex_offset = 0;
+ uint32_t java_line = DecodeUnsignedLeb128(&dbgstream);
+
+ // skip parameters
+ for (uint32_t param_count = DecodeUnsignedLeb128(&dbgstream); param_count != 0; --param_count) {
+ DecodeUnsignedLeb128(&dbgstream);
+ }
+
+ for (bool is_end = false; is_end == false; ) {
+ uint8_t opcode = *dbgstream;
+ dbgstream++;
+ switch (opcode) {
+ case DexFile::DBG_END_SEQUENCE:
+ is_end = true;
+ break;
+
+ case DexFile::DBG_ADVANCE_PC:
+ dex_offset += DecodeUnsignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_ADVANCE_LINE:
+ java_line += DecodeSignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_START_LOCAL:
+ case DexFile::DBG_START_LOCAL_EXTENDED:
+ DecodeUnsignedLeb128(&dbgstream);
+ DecodeUnsignedLeb128(&dbgstream);
+ DecodeUnsignedLeb128(&dbgstream);
+
+ if (opcode == DexFile::DBG_START_LOCAL_EXTENDED) {
+ DecodeUnsignedLeb128(&dbgstream);
+ }
+ break;
+
+ case DexFile::DBG_END_LOCAL:
+ case DexFile::DBG_RESTART_LOCAL:
+ DecodeUnsignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_SET_PROLOGUE_END:
+ case DexFile::DBG_SET_EPILOGUE_BEGIN:
+ case DexFile::DBG_SET_FILE:
+ break;
+
+ default:
+ adjopcode = opcode - DexFile::DBG_FIRST_SPECIAL;
+ dex_offset += adjopcode / DexFile::DBG_LINE_RANGE;
+ java_line += DexFile::DBG_LINE_BASE + (adjopcode % DexFile::DBG_LINE_RANGE);
+
+ for (SrcMap::const_iterator found = pc2dex.FindByTo(dex_offset);
+ found != pc2dex.end() && found->to_ == static_cast<int32_t>(dex_offset);
+ found++) {
+ result->push_back({found->from_ + start_pc, static_cast<int32_t>(java_line)});
+ }
+ break;
+ }
}
}
void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
std::vector<uint8_t>* dbg_info,
std::vector<uint8_t>* dbg_abbrev,
- std::vector<uint8_t>* dbg_str) {
+ std::vector<uint8_t>* dbg_str,
+ std::vector<uint8_t>* dbg_line,
+ uint32_t text_section_offset) {
+ const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
+
+ uint32_t producer_str_offset = PushStr(dbg_str, "Android dex2oat");
+
// Create the debug_abbrev section with boilerplate information.
// We only care about low_pc and high_pc right now for the compilation
// unit and methods.
// Tag 1: Compilation unit: DW_TAG_compile_unit.
- dbg_abbrev->push_back(1);
- dbg_abbrev->push_back(DW_TAG_compile_unit);
+ PushByte(dbg_abbrev, 1);
+ PushByte(dbg_abbrev, DW_TAG_compile_unit);
// There are children (the methods).
- dbg_abbrev->push_back(DW_CHILDREN_yes);
+ PushByte(dbg_abbrev, DW_CHILDREN_yes);
+
+ // DW_AT_producer DW_FORM_data1.
+ // REVIEW: we can get rid of dbg_str section if
+ // DW_FORM_string (immediate string) was used everywhere instead of
+ // DW_FORM_strp (ref to string from .debug_str section).
+ // DW_FORM_strp makes sense only if we reuse the strings.
+ PushByte(dbg_abbrev, DW_AT_producer);
+ PushByte(dbg_abbrev, DW_FORM_strp);
// DW_LANG_Java DW_FORM_data1.
- dbg_abbrev->push_back(DW_AT_language);
- dbg_abbrev->push_back(DW_FORM_data1);
+ PushByte(dbg_abbrev, DW_AT_language);
+ PushByte(dbg_abbrev, DW_FORM_data1);
// DW_AT_low_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_low_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_low_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// DW_AT_high_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_high_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_high_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
+
+ if (dbg_line != nullptr) {
+ // DW_AT_stmt_list DW_FORM_sec_offset.
+ PushByte(dbg_abbrev, DW_AT_stmt_list);
+ PushByte(dbg_abbrev, DW_FORM_sec_offset);
+ }
// End of DW_TAG_compile_unit.
PushHalf(dbg_abbrev, 0);
// Tag 2: Compilation unit: DW_TAG_subprogram.
- dbg_abbrev->push_back(2);
- dbg_abbrev->push_back(DW_TAG_subprogram);
+ PushByte(dbg_abbrev, 2);
+ PushByte(dbg_abbrev, DW_TAG_subprogram);
// There are no children.
- dbg_abbrev->push_back(DW_CHILDREN_no);
+ PushByte(dbg_abbrev, DW_CHILDREN_no);
// Name of the method.
- dbg_abbrev->push_back(DW_AT_name);
- dbg_abbrev->push_back(DW_FORM_strp);
+ PushByte(dbg_abbrev, DW_AT_name);
+ PushByte(dbg_abbrev, DW_FORM_strp);
// DW_AT_low_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_low_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_low_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// DW_AT_high_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_high_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_high_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// End of DW_TAG_subprogram.
PushHalf(dbg_abbrev, 0);
// Start the debug_info section with the header information
// 'unit_length' will be filled in later.
+ int cunit_length = dbg_info->size();
PushWord(dbg_info, 0);
// 'version' - 3.
@@ -1075,55 +1343,153 @@ void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
PushWord(dbg_info, 0);
// Address size: 4.
- dbg_info->push_back(4);
+ PushByte(dbg_info, 4);
// Start the description for the compilation unit.
// This uses tag 1.
- dbg_info->push_back(1);
+ PushByte(dbg_info, 1);
+
+ // The producer is Android dex2oat.
+ PushWord(dbg_info, producer_str_offset);
// The language is Java.
- dbg_info->push_back(DW_LANG_Java);
+ PushByte(dbg_info, DW_LANG_Java);
- // Leave space for low_pc and high_pc.
- int low_pc_offset = dbg_info->size();
+ // low_pc and high_pc.
+ uint32_t cunit_low_pc = 0 - 1;
+ uint32_t cunit_high_pc = 0;
+ int cunit_low_pc_pos = dbg_info->size();
PushWord(dbg_info, 0);
PushWord(dbg_info, 0);
- // Walk through the information in the method table, and enter into dbg_info.
- const std::vector<OatWriter::DebugInfo>& dbg = oat_writer->GetCFIMethodInfo();
- uint32_t low_pc = 0xFFFFFFFFU;
- uint32_t high_pc = 0;
+ if (dbg_line == nullptr) {
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
+
+ cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+ // Start a new TAG: subroutine (2).
+ PushByte(dbg_info, 2);
- for (uint32_t i = 0; i < dbg.size(); i++) {
- const OatWriter::DebugInfo& info = dbg[i];
- if (info.low_pc_ < low_pc) {
- low_pc = info.low_pc_;
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+ PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
}
- if (info.high_pc_ > high_pc) {
- high_pc = info.high_pc_;
+ } else {
+ // TODO: in gdb info functions <regexp> - reports Java functions, but
+ // source file is <unknown> because .debug_line is formed as one
+ // compilation unit. To fix this it is possible to generate
+ // a separate compilation unit for every distinct Java source.
+ // Each of the these compilation units can have several non-adjacent
+ // method ranges.
+
+ // Line number table offset
+ PushWord(dbg_info, dbg_line->size());
+
+ size_t lnt_length = dbg_line->size();
+ PushWord(dbg_line, 0);
+
+ PushHalf(dbg_line, 4); // LNT Version DWARF v4 => 4
+
+ size_t lnt_hdr_length = dbg_line->size();
+ PushWord(dbg_line, 0); // TODO: 64-bit uses 8-byte here
+
+ PushByte(dbg_line, 1); // minimum_instruction_length (ubyte)
+ PushByte(dbg_line, 1); // maximum_operations_per_instruction (ubyte) = always 1
+ PushByte(dbg_line, 1); // default_is_stmt (ubyte)
+
+ const int8_t LINE_BASE = -5;
+ PushByte(dbg_line, LINE_BASE); // line_base (sbyte)
+
+ const uint8_t LINE_RANGE = 14;
+ PushByte(dbg_line, LINE_RANGE); // line_range (ubyte)
+
+ const uint8_t OPCODE_BASE = 13;
+ PushByte(dbg_line, OPCODE_BASE); // opcode_base (ubyte)
+
+ // Standard_opcode_lengths (array of ubyte).
+ PushByte(dbg_line, 0); PushByte(dbg_line, 1); PushByte(dbg_line, 1);
+ PushByte(dbg_line, 1); PushByte(dbg_line, 1); PushByte(dbg_line, 0);
+ PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+ PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+
+ PushByte(dbg_line, 0); // include_directories (sequence of path names) = EMPTY
+
+ // File_names (sequence of file entries).
+ std::unordered_map<const char*, size_t> files;
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
+ // TODO: add package directory to the file name
+ const char* file_name = dbg.src_file_name_ == nullptr ? "null" : dbg.src_file_name_;
+ auto found = files.find(file_name);
+ if (found == files.end()) {
+ size_t file_index = 1 + files.size();
+ files[file_name] = file_index;
+ PushStr(dbg_line, file_name);
+ PushByte(dbg_line, 0); // include directory index = LEB128(0) - no directory
+ PushByte(dbg_line, 0); // modification time = LEB128(0) - NA
+ PushByte(dbg_line, 0); // file length = LEB128(0) - NA
+ }
+ }
+ PushByte(dbg_line, 0); // End of file_names.
+
+ // Set lnt header length.
+ UpdateWord(dbg_line, lnt_hdr_length, dbg_line->size() - lnt_hdr_length - 4);
+
+ // Generate Line Number Program code, one long program for all methods.
+ LineTableGenerator line_table_generator(LINE_BASE, LINE_RANGE, OPCODE_BASE,
+ dbg_line, 0, 1);
+
+ SrcMap pc2java_map;
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
+ const char* file_name = (dbg.src_file_name_ == nullptr) ? "null" : dbg.src_file_name_;
+ size_t file_index = files[file_name];
+ DCHECK_NE(file_index, 0U) << file_name;
+
+ cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+ // Start a new TAG: subroutine (2).
+ PushByte(dbg_info, 2);
+
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+ PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
+
+ pc2java_map.clear();
+ GetLineInfoForJava(dbg.dbgstream_, dbg.compiled_method_->GetSrcMappingTable(),
+ &pc2java_map, dbg.low_pc_);
+ pc2java_map.DeltaFormat({dbg.low_pc_, 1}, dbg.high_pc_);
+
+ line_table_generator.SetFile(file_index);
+ line_table_generator.SetAddr(dbg.low_pc_ + text_section_offset);
+ line_table_generator.SetLine(1);
+ for (auto& src_map_elem : pc2java_map) {
+ line_table_generator.PutDelta(src_map_elem.from_, src_map_elem.to_);
+ }
}
- // Start a new TAG: subroutine (2).
- dbg_info->push_back(2);
-
- // Enter the name into the string table (and NUL terminate).
- uint32_t str_offset = dbg_str->size();
- dbg_str->insert(dbg_str->end(), info.method_name_.begin(), info.method_name_.end());
- dbg_str->push_back('\0');
+ // End Sequence should have the highest address set.
+ line_table_generator.SetAddr(cunit_high_pc + text_section_offset);
+ line_table_generator.EndSequence();
- // Enter name, low_pc, high_pc.
- PushWord(dbg_info, str_offset);
- PushWord(dbg_info, info.low_pc_);
- PushWord(dbg_info, info.high_pc_);
+ // set lnt length
+ UpdateWord(dbg_line, lnt_length, dbg_line->size() - lnt_length - 4);
}
// One byte terminator
- dbg_info->push_back(0);
+ PushByte(dbg_info, 0);
+
+ // Fill in cunit's low_pc and high_pc.
+ UpdateWord(dbg_info, cunit_low_pc_pos, cunit_low_pc + text_section_offset);
+ UpdateWord(dbg_info, cunit_low_pc_pos + 4, cunit_high_pc + text_section_offset);
- // We have now walked all the methods. Fill in lengths and low/high PCs.
- UpdateWord(dbg_info, 0, dbg_info->size() - 4);
- UpdateWord(dbg_info, low_pc_offset, low_pc);
- UpdateWord(dbg_info, low_pc_offset + 4, high_pc);
+ // We have now walked all the methods. Fill in lengths.
+ UpdateWord(dbg_info, cunit_length, dbg_info->size() - cunit_length - 4);
}
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 8cfe550495..c7ef872174 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -48,7 +48,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
~ElfWriterQuick() {}
class ElfBuilder;
- void WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer);
+ void WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer);
void ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug);
class ElfSectionBuilder {
@@ -237,6 +237,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
}
~ElfBuilder() {}
+ bool Init();
bool Write();
// Adds the given raw section to the builder. This will copy it. The caller
@@ -253,8 +254,29 @@ class ElfWriterQuick FINAL : public ElfWriter {
bool fatal_error_ = false;
+ // What phdr is.
+ static const uint32_t PHDR_OFFSET = sizeof(Elf32_Ehdr);
+ enum : uint8_t {
+ PH_PHDR = 0,
+ PH_LOAD_R__ = 1,
+ PH_LOAD_R_X = 2,
+ PH_LOAD_RW_ = 3,
+ PH_DYNAMIC = 4,
+ PH_NUM = 5,
+ };
+ static const uint32_t PHDR_SIZE = sizeof(Elf32_Phdr) * PH_NUM;
+ Elf32_Phdr program_headers_[PH_NUM];
+
Elf32_Ehdr elf_header_;
+ Elf32_Shdr null_hdr_;
+ std::string shstrtab_;
+ uint32_t section_index_;
+ std::string dynstr_;
+ uint32_t dynstr_soname_offset_;
+ std::vector<Elf32_Shdr*> section_ptrs_;
+ std::vector<Elf32_Word> hash_;
+
public:
ElfOatSectionBuilder text_builder_;
ElfOatSectionBuilder rodata_builder_;
@@ -316,7 +338,8 @@ class ElfWriterQuick FINAL : public ElfWriter {
* @param dbg_str Debug strings.
*/
void FillInCFIInformation(OatWriter* oat_writer, std::vector<uint8_t>* dbg_info,
- std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str);
+ std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str,
+ std::vector<uint8_t>* dbg_line, uint32_t text_section_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index ba7e13f815..9c9cdf2700 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -232,7 +232,7 @@ bool ImageWriter::AllocMemory() {
size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
std::string error_msg;
image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
- true, &error_msg));
+ false, &error_msg));
if (UNLIKELY(image_.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 75d3030baf..a21004c220 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -16,6 +16,8 @@
#include <memory>
+#include <math.h>
+
#include "class_linker.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -46,6 +48,15 @@ namespace art {
class JniCompilerTest : public CommonCompilerTest {
protected:
+ void SetUp() OVERRIDE {
+ CommonCompilerTest::SetUp();
+ check_generic_jni_ = false;
+ }
+
+ void SetCheckGenericJni(bool generic) {
+ check_generic_jni_ = generic;
+ }
+
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
@@ -61,13 +72,17 @@ class JniCompilerTest : public CommonCompilerTest {
method = c->FindVirtualMethod(method_name, method_sig);
}
ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
- if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
- method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
- CompileMethod(method);
- ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
- << method_name << " " << method_sig;
- ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr)
- << method_name << " " << method_sig;
+ if (check_generic_jni_) {
+ method->SetEntryPointFromQuickCompiledCode(class_linker_->GetQuickGenericJniTrampoline());
+ } else {
+ if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
+ method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
+ CompileMethod(method);
+ ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
+ << method_name << " " << method_sig;
+ ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr)
+ << method_name << " " << method_sig;
+ }
}
}
@@ -115,16 +130,65 @@ class JniCompilerTest : public CommonCompilerTest {
static jobject jobj_;
static jobject class_loader_;
-
protected:
+ // We have to list the methods here so we can share them between default and generic JNI.
+ void CompileAndRunNoArgMethodImpl();
+ void CompileAndRunIntMethodThroughStubImpl();
+ void CompileAndRunStaticIntMethodThroughStubImpl();
+ void CompileAndRunIntMethodImpl();
+ void CompileAndRunIntIntMethodImpl();
+ void CompileAndRunLongLongMethodImpl();
+ void CompileAndRunDoubleDoubleMethodImpl();
+ void CompileAndRun_fooJJ_synchronizedImpl();
+ void CompileAndRunIntObjectObjectMethodImpl();
+ void CompileAndRunStaticIntIntMethodImpl();
+ void CompileAndRunStaticDoubleDoubleMethodImpl();
+ void RunStaticLogDoubleMethodImpl();
+ void RunStaticLogFloatMethodImpl();
+ void RunStaticReturnTrueImpl();
+ void RunStaticReturnFalseImpl();
+ void RunGenericStaticReturnIntImpl();
+ void CompileAndRunStaticIntObjectObjectMethodImpl();
+ void CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl();
+ void ExceptionHandlingImpl();
+ void NativeStackTraceElementImpl();
+ void ReturnGlobalRefImpl();
+ void LocalReferenceTableClearingTestImpl();
+ void JavaLangSystemArrayCopyImpl();
+ void CompareAndSwapIntImpl();
+ void GetTextImpl();
+ void GetSinkPropertiesNativeImpl();
+ void UpcallReturnTypeChecking_InstanceImpl();
+ void UpcallReturnTypeChecking_StaticImpl();
+ void UpcallArgumentTypeChecking_InstanceImpl();
+ void UpcallArgumentTypeChecking_StaticImpl();
+ void CompileAndRunFloatFloatMethodImpl();
+ void CheckParameterAlignImpl();
+ void MaxParamNumberImpl();
+ void WithoutImplementationImpl();
+ void StackArgsIntsFirstImpl();
+ void StackArgsFloatsFirstImpl();
+ void StackArgsMixedImpl();
+
JNIEnv* env_;
jmethodID jmethod_;
+ bool check_generic_jni_;
};
jclass JniCompilerTest::jklass_;
jobject JniCompilerTest::jobj_;
jobject JniCompilerTest::class_loader_;
+#define JNI_TEST(TestName) \
+ TEST_F(JniCompilerTest, TestName ## Default) { \
+ TestName ## Impl(); \
+ } \
+ \
+ TEST_F(JniCompilerTest, TestName ## Generic) { \
+ TEST_DISABLED_FOR_MIPS(); \
+ SetCheckGenericJni(true); \
+ TestName ## Impl(); \
+ }
int gJava_MyClassNatives_foo_calls = 0;
void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
@@ -139,7 +203,7 @@ void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
}
-TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
+void JniCompilerTest::CompileAndRunNoArgMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
@@ -148,9 +212,13 @@ TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
EXPECT_EQ(1, gJava_MyClassNatives_foo_calls);
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+
+ gJava_MyClassNatives_foo_calls = 0;
}
-TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
+JNI_TEST(CompileAndRunNoArgMethod)
+
+void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "bar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_bar
@@ -163,7 +231,9 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
EXPECT_EQ(25, result);
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
+JNI_TEST(CompileAndRunIntMethodThroughStub)
+
+void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar
@@ -176,6 +246,8 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
EXPECT_EQ(43, result);
}
+JNI_TEST(CompileAndRunStaticIntMethodThroughStub)
+
int gJava_MyClassNatives_fooI_calls = 0;
jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
// 1 = thisObj
@@ -189,7 +261,7 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
return x;
}
-TEST_F(JniCompilerTest, CompileAndRunIntMethod) {
+void JniCompilerTest::CompileAndRunIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooI));
@@ -201,8 +273,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethod) {
result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D);
EXPECT_EQ(static_cast<jint>(0xCAFED00D), result);
EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls);
+
+ gJava_MyClassNatives_fooI_calls = 0;
}
+JNI_TEST(CompileAndRunIntMethod)
+
int gJava_MyClassNatives_fooII_calls = 0;
jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
// 1 = thisObj
@@ -216,7 +292,7 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) {
+void JniCompilerTest::CompileAndRunIntIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooII));
@@ -229,8 +305,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) {
0xCAFED00D);
EXPECT_EQ(static_cast<jint>(0xCAFEBABE - 0xCAFED00D), result);
EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls);
+
+ gJava_MyClassNatives_fooII_calls = 0;
}
+JNI_TEST(CompileAndRunIntIntMethod)
+
int gJava_MyClassNatives_fooJJ_calls = 0;
jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) {
// 1 = thisObj
@@ -244,7 +324,7 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y)
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) {
+void JniCompilerTest::CompileAndRunLongLongMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ));
@@ -258,8 +338,12 @@ TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) {
result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a);
EXPECT_EQ(b - a, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls);
+
+ gJava_MyClassNatives_fooJJ_calls = 0;
}
+JNI_TEST(CompileAndRunLongLongMethod)
+
int gJava_MyClassNatives_fooDD_calls = 0;
jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) {
// 1 = thisObj
@@ -273,7 +357,7 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) {
+void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooDD));
@@ -288,6 +372,8 @@ TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) {
result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b);
EXPECT_EQ(a - b, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls);
+
+ gJava_MyClassNatives_fooDD_calls = 0;
}
int gJava_MyClassNatives_fooJJ_synchronized_calls = 0;
@@ -303,7 +389,7 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong
return x | y;
}
-TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) {
+void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ_synchronized));
@@ -314,8 +400,12 @@ TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) {
jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b);
EXPECT_EQ(a | b, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls);
+
+ gJava_MyClassNatives_fooJJ_synchronized_calls = 0;
}
+JNI_TEST(CompileAndRun_fooJJ_synchronized)
+
int gJava_MyClassNatives_fooIOO_calls = 0;
jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y,
jobject z) {
@@ -339,7 +429,7 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject
}
}
-TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -369,8 +459,12 @@ TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls);
+
+ gJava_MyClassNatives_fooIOO_calls = 0;
}
+JNI_TEST(CompileAndRunIntObjectObjectMethod)
+
int gJava_MyClassNatives_fooSII_calls = 0;
jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
// 1 = klass
@@ -384,7 +478,7 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
return x + y;
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) {
+void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSII));
@@ -393,8 +487,12 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) {
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30);
EXPECT_EQ(50, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls);
+
+ gJava_MyClassNatives_fooSII_calls = 0;
}
+JNI_TEST(CompileAndRunStaticIntIntMethod)
+
int gJava_MyClassNatives_fooSDD_calls = 0;
jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) {
// 1 = klass
@@ -408,7 +506,7 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) {
+void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSDD));
@@ -422,8 +520,87 @@ TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) {
result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b);
EXPECT_EQ(a - b, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooSDD_calls);
+
+ gJava_MyClassNatives_fooSDD_calls = 0;
+}
+
+JNI_TEST(CompileAndRunStaticDoubleDoubleMethod)
+
+// The x86 generic JNI code had a bug where it assumed a floating
+// point return value would be in xmm0. We use log, to somehow ensure
+// the compiler will use the floating point stack.
+
+jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+ return log(x);
+}
+
+void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "logD", "(D)D", reinterpret_cast<void*>(&Java_MyClassNatives_logD));
+
+ jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0);
+ EXPECT_EQ(log(2.0), result);
+}
+
+JNI_TEST(RunStaticLogDoubleMethod)
+
+jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+ return logf(x);
+}
+
+void JniCompilerTest::RunStaticLogFloatMethodImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "logF", "(F)F", reinterpret_cast<void*>(&Java_MyClassNatives_logF));
+
+ jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0);
+ EXPECT_EQ(logf(2.0), result);
+}
+
+JNI_TEST(RunStaticLogFloatMethod)
+
+jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+ return JNI_TRUE;
}
+jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+ return JNI_FALSE;
+}
+
+jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+ return 42;
+}
+
+void JniCompilerTest::RunStaticReturnTrueImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnTrue", "()Z", reinterpret_cast<void*>(&Java_MyClassNatives_returnTrue));
+
+ jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_);
+ EXPECT_TRUE(result);
+}
+
+JNI_TEST(RunStaticReturnTrue)
+
+void JniCompilerTest::RunStaticReturnFalseImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnFalse", "()Z",
+ reinterpret_cast<void*>(&Java_MyClassNatives_returnFalse));
+
+ jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_);
+ EXPECT_FALSE(result);
+}
+
+JNI_TEST(RunStaticReturnFalse)
+
+void JniCompilerTest::RunGenericStaticReturnIntImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnInt", "()I", reinterpret_cast<void*>(&Java_MyClassNatives_returnInt));
+
+ jint result = env_->CallStaticIntMethod(jklass_, jmethod_);
+ EXPECT_EQ(42, result);
+}
+
+JNI_TEST(RunGenericStaticReturnInt)
+
int gJava_MyClassNatives_fooSIOO_calls = 0;
jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y,
jobject z) {
@@ -448,7 +625,7 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -478,8 +655,12 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls);
+
+ gJava_MyClassNatives_fooSIOO_calls = 0;
}
+JNI_TEST(CompileAndRunStaticIntObjectObjectMethod)
+
int gJava_MyClassNatives_fooSSIOO_calls = 0;
jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) {
// 3 = klass + y + z
@@ -502,7 +683,7 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject
}
}
-TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -532,14 +713,18 @@ TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls);
+
+ gJava_MyClassNatives_fooSSIOO_calls = 0;
}
+JNI_TEST(CompileAndRunStaticSynchronizedIntObjectObjectMethod)
+
void Java_MyClassNatives_throwException(JNIEnv* env, jobject) {
jclass c = env->FindClass("java/lang/RuntimeException");
env->ThrowNew(c, "hello");
}
-TEST_F(JniCompilerTest, ExceptionHandling) {
+void JniCompilerTest::ExceptionHandlingImpl() {
TEST_DISABLED_FOR_PORTABLE();
{
ASSERT_FALSE(runtime_->IsStarted());
@@ -580,8 +765,12 @@ TEST_F(JniCompilerTest, ExceptionHandling) {
SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+
+ gJava_MyClassNatives_foo_calls = 0;
}
+JNI_TEST(ExceptionHandling)
+
jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
if (i <= 0) {
// We want to check raw Object* / Array* below
@@ -620,7 +809,7 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
}
}
-TEST_F(JniCompilerTest, NativeStackTraceElement) {
+void JniCompilerTest::NativeStackTraceElementImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_nativeUpCall));
@@ -628,11 +817,13 @@ TEST_F(JniCompilerTest, NativeStackTraceElement) {
EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result);
}
+JNI_TEST(NativeStackTraceElement)
+
jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) {
return env->NewGlobalRef(x);
}
-TEST_F(JniCompilerTest, ReturnGlobalRef) {
+void JniCompilerTest::ReturnGlobalRefImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooO));
@@ -641,6 +832,8 @@ TEST_F(JniCompilerTest, ReturnGlobalRef) {
EXPECT_TRUE(env_->IsSameObject(result, jobj_));
}
+JNI_TEST(ReturnGlobalRef)
+
jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) {
// Add 10 local references
ScopedObjectAccess soa(env);
@@ -650,7 +843,7 @@ jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) {
return x+1;
}
-TEST_F(JniCompilerTest, LocalReferenceTableClearingTest) {
+void JniCompilerTest::LocalReferenceTableClearingTestImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I", reinterpret_cast<void*>(&local_ref_test));
// 1000 invocations of a method that adds 10 local references
@@ -660,6 +853,8 @@ TEST_F(JniCompilerTest, LocalReferenceTableClearingTest) {
}
}
+JNI_TEST(LocalReferenceTableClearingTest)
+
void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) {
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst));
@@ -669,13 +864,15 @@ void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject
EXPECT_EQ(9876, length);
}
-TEST_F(JniCompilerTest, JavaLangSystemArrayCopy) {
+void JniCompilerTest::JavaLangSystemArrayCopyImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V",
reinterpret_cast<void*>(&my_arraycopy));
env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876);
}
+JNI_TEST(JavaLangSystemArrayCopy)
+
jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) {
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj));
@@ -685,7 +882,7 @@ jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint ex
return JNI_TRUE;
}
-TEST_F(JniCompilerTest, CompareAndSwapInt) {
+void JniCompilerTest::CompareAndSwapIntImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z",
reinterpret_cast<void*>(&my_casi));
@@ -694,6 +891,8 @@ TEST_F(JniCompilerTest, CompareAndSwapInt) {
EXPECT_EQ(result, JNI_TRUE);
}
+JNI_TEST(CompareAndSwapInt)
+
jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) {
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
@@ -703,7 +902,7 @@ jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2,
return 42;
}
-TEST_F(JniCompilerTest, GetText) {
+void JniCompilerTest::GetTextImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
reinterpret_cast<void*>(&my_gettext));
@@ -712,6 +911,8 @@ TEST_F(JniCompilerTest, GetText) {
EXPECT_EQ(result, 42);
}
+JNI_TEST(GetText)
+
int gJava_MyClassNatives_GetSinkProperties_calls = 0;
jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstring s) {
// 1 = thisObj
@@ -729,7 +930,7 @@ jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstri
return nullptr;
}
-TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
+void JniCompilerTest::GetSinkPropertiesNativeImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_GetSinkProperties));
@@ -739,8 +940,12 @@ TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr));
EXPECT_EQ(nullptr, result);
EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls);
+
+ gJava_MyClassNatives_GetSinkProperties_calls = 0;
}
+JNI_TEST(GetSinkPropertiesNative)
+
// This should return jclass, but we're imitating a bug pattern.
jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) {
return env->NewStringUTF("not a class!");
@@ -751,7 +956,7 @@ jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclas
return env->NewStringUTF("not a class!");
}
-TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) {
+void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldReturnClass));
@@ -769,7 +974,9 @@ TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) {
check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticObjectMethodV");
}
-TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) {
+JNI_TEST(UpcallReturnTypeChecking_Instance)
+
+void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldReturnClass));
@@ -787,6 +994,8 @@ TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) {
check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallObjectMethodV");
}
+JNI_TEST(UpcallReturnTypeChecking_Static)
+
// This should take jclass, but we're imitating a bug pattern.
void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) {
}
@@ -795,7 +1004,7 @@ void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jcl
void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) {
}
-TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) {
+void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldTakeClass));
@@ -806,7 +1015,9 @@ TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) {
check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.instanceMethodThatShouldTakeClass(int, java.lang.Class)");
}
-TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) {
+JNI_TEST(UpcallArgumentTypeChecking_Instance)
+
+void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldTakeClass));
@@ -817,6 +1028,8 @@ TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) {
check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.staticMethodThatShouldTakeClass(int, java.lang.Class)");
}
+JNI_TEST(UpcallArgumentTypeChecking_Static)
+
jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1, jfloat f2) {
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
@@ -827,7 +1040,7 @@ jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1,
return f1 - f2; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunFloatFloatMethod) {
+void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkFloats", "(FF)F",
reinterpret_cast<void*>(&Java_MyClassNatives_checkFloats));
@@ -841,6 +1054,8 @@ TEST_F(JniCompilerTest, CompileAndRunFloatFloatMethod) {
EXPECT_EQ(a - b, result);
}
+JNI_TEST(CompileAndRunFloatFloatMethod)
+
void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
@@ -852,7 +1067,7 @@ void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint
EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0));
}
-TEST_F(JniCompilerTest, CheckParameterAlign) {
+void JniCompilerTest::CheckParameterAlignImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkParameterAlign", "(IJ)V",
reinterpret_cast<void*>(&Java_MyClassNatives_checkParameterAlign));
@@ -860,6 +1075,8 @@ TEST_F(JniCompilerTest, CheckParameterAlign) {
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_, 1234, INT64_C(0x12345678ABCDEF0));
}
+JNI_TEST(CheckParameterAlign)
+
void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject thisObj,
jobject o0, jobject o1, jobject o2, jobject o3, jobject o4, jobject o5, jobject o6, jobject o7,
jobject o8, jobject o9, jobject o10, jobject o11, jobject o12, jobject o13, jobject o14, jobject o15,
@@ -1265,7 +1482,7 @@ const char* longSig =
"Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;"
"Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V";
-TEST_F(JniCompilerTest, MaxParamNumber) {
+void JniCompilerTest::MaxParamNumberImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "maxParamNumber", longSig,
reinterpret_cast<void*>(&Java_MyClassNatives_maxParamNumber));
@@ -1289,7 +1506,9 @@ TEST_F(JniCompilerTest, MaxParamNumber) {
env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args);
}
-TEST_F(JniCompilerTest, WithoutImplementation) {
+JNI_TEST(MaxParamNumber)
+
+void JniCompilerTest::WithoutImplementationImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "withoutImplementation", "()V", nullptr);
@@ -1299,6 +1518,8 @@ TEST_F(JniCompilerTest, WithoutImplementation) {
EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
}
+JNI_TEST(WithoutImplementation)
+
void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
@@ -1337,7 +1558,7 @@ void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1,
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsIntsFirst) {
+void JniCompilerTest::StackArgsIntsFirstImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsIntsFirst));
@@ -1368,6 +1589,8 @@ TEST_F(JniCompilerTest, StackArgsIntsFirst) {
f3, f4, f5, f6, f7, f8, f9, f10);
}
+JNI_TEST(StackArgsIntsFirst)
+
void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
@@ -1406,7 +1629,7 @@ void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsFloatsFirst) {
+void JniCompilerTest::StackArgsFloatsFirstImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsFloatsFirst));
@@ -1437,6 +1660,8 @@ TEST_F(JniCompilerTest, StackArgsFloatsFirst) {
i4, i5, i6, i7, i8, i9, i10);
}
+JNI_TEST(StackArgsFloatsFirst)
+
void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
@@ -1474,7 +1699,7 @@ void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jflo
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsMixed) {
+void JniCompilerTest::StackArgsMixedImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsMixed));
@@ -1505,4 +1730,6 @@ TEST_F(JniCompilerTest, StackArgsMixed) {
f7, i8, f8, i9, f9, i10, f10);
}
+JNI_TEST(StackArgsMixed)
+
} // namespace art
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 1ba5d3218e..41a91ed9d3 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -18,6 +18,7 @@
#include <zlib.h>
+#include "base/allocator.h"
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -411,10 +412,12 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
const uint32_t quick_code_start = quick_code_offset -
writer_->oat_header_->GetExecutableOffset();
+ const DexFile::CodeItem *code_item = it.GetMethodCodeItem();
writer_->method_info_.push_back(DebugInfo(name,
- quick_code_start,
- quick_code_start + code_size,
- compiled_method));
+ dex_file_->GetSourceFile(dex_file_->GetClassDef(class_def_index_)),
+ quick_code_start, quick_code_start + code_size,
+ code_item == nullptr ? nullptr : dex_file_->GetDebugInfoStream(code_item),
+ compiled_method));
}
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index ef5fd6b1c2..11f8bffd11 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -98,14 +98,18 @@ class OatWriter {
~OatWriter();
struct DebugInfo {
- DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc,
+ DebugInfo(const std::string& method_name, const char* src_file_name,
+ uint32_t low_pc, uint32_t high_pc, const uint8_t* dbgstream,
CompiledMethod* compiled_method)
- : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc),
+ : method_name_(method_name), src_file_name_(src_file_name),
+ low_pc_(low_pc), high_pc_(high_pc), dbgstream_(dbgstream),
compiled_method_(compiled_method) {
}
std::string method_name_; // Note: this name is a pretty-printed name.
+ const char* src_file_name_;
uint32_t low_pc_;
uint32_t high_pc_;
+ const uint8_t* dbgstream_;
CompiledMethod* compiled_method_;
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index bd8c27ec3e..7269fff62c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -19,6 +19,7 @@
#include "code_generator_arm.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
+#include "compiled_method.h"
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
@@ -297,7 +298,7 @@ void CodeGenerator::BuildNativeGCMap(
}
}
-void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_map) const {
uint32_t pc2dex_data_size = 0u;
uint32_t pc2dex_entries = pc_infos_.Size();
uint32_t pc2dex_offset = 0u;
@@ -305,6 +306,10 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
+ if (src_map != nullptr) {
+ src_map->reserve(pc2dex_entries);
+ }
+
// We currently only have pc2dex entries.
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
@@ -312,6 +317,9 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
pc2dex_offset = pc_info.native_pc;
pc2dex_dalvik_offset = pc_info.dex_pc;
+ if (src_map != nullptr) {
+ src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
+ }
}
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b31c3a3e83..12337c93de 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -32,6 +32,7 @@ static size_t constexpr kUninitializedFrameSize = 0;
class CodeGenerator;
class DexCompilationUnit;
+class SrcMap;
class CodeAllocator {
public:
@@ -126,7 +127,7 @@ class CodeGenerator : public ArenaObject {
void GenerateSlowPaths();
- void BuildMappingTable(std::vector<uint8_t>* vector) const;
+ void BuildMappingTable(std::vector<uint8_t>* vector, SrcMap* src_map) const;
void BuildVMapTable(std::vector<uint8_t>* vector) const;
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
@@ -142,6 +143,7 @@ class CodeGenerator : public ArenaObject {
protected:
CodeGenerator(HGraph* graph, size_t number_of_registers)
: frame_size_(kUninitializedFrameSize),
+ core_spill_mask_(0),
graph_(graph),
block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8a5077b962..fce6ab0a33 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -161,7 +161,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
std::vector<uint8_t> mapping_table;
- codegen->BuildMappingTable(&mapping_table);
+ SrcMap src_mapping_table;
+ codegen->BuildMappingTable(&mapping_table,
+ GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
+ &src_mapping_table : nullptr);
std::vector<uint8_t> vmap_table;
codegen->BuildVMapTable(&vmap_table);
std::vector<uint8_t> gc_map;
@@ -173,6 +176,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
+ &src_mapping_table,
mapping_table,
vmap_table,
gc_map,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index bd3a7d9767..da13b1ec55 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -16,6 +16,7 @@
#include "register_allocator.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "ssa_liveness_analysis.h"
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index fbdc0b9593..5de1ab9c31 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -16,6 +16,7 @@
#include "ssa_liveness_analysis.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "nodes.h"
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index 39f7d185a6..de35f3d197 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,11 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
+#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -37,7 +38,7 @@ class ArenaBitVectorAllocator : public Allocator {
static void operator delete(void* p) {} // Nop.
private:
- ArenaAlloc* arena_;
+ ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
};
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index 485ed76d12..c92658f7d6 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -51,23 +51,23 @@ std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind);
* A BitVector implementation that uses Arena allocation.
*/
class ArenaBitVector : public BitVector {
- public:
- ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
- OatBitMapKind kind = kBitMapMisc);
- ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
- OatBitMapKind kind = kBitMapMisc);
- ~ArenaBitVector() {}
+ public:
+ ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
+ OatBitMapKind kind = kBitMapMisc);
+ ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
+ OatBitMapKind kind = kBitMapMisc);
+ ~ArenaBitVector() {}
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
}
static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
}
static void operator delete(void* p) {} // Nop.
- private:
- const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+ private:
+ const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
};
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
index b3d1a47b80..83e5f5ad39 100644
--- a/compiler/utils/dwarf_cfi.cc
+++ b/compiler/utils/dwarf_cfi.cc
@@ -65,44 +65,86 @@ void DW_CFA_restore_state(std::vector<uint8_t>* buf) {
buf->push_back(0x0b);
}
-void WriteFDEHeader(std::vector<uint8_t>* buf) {
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit) {
// 'length' (filled in by other functions).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0xffffffff); // Indicates 64bit
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'CIE_pointer' (filled in by linker).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'initial_location' (filled in by linker).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'address_range' (filled in by other functions).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// Augmentation length: 0
buf->push_back(0);
}
-void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data) {
- const int kOffsetOfAddressRange = 12;
- CHECK(buf->size() >= kOffsetOfAddressRange + sizeof(uint32_t));
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit) {
+ const size_t kOffsetOfAddressRange = is_64bit? 28 : 12;
+ CHECK(buf->size() >= kOffsetOfAddressRange + (is_64bit? 8 : 4));
uint8_t *p = buf->data() + kOffsetOfAddressRange;
- p[0] = data;
- p[1] = data >> 8;
- p[2] = data >> 16;
- p[3] = data >> 24;
+ if (is_64bit) {
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+ p[4] = data >> 32;
+ p[5] = data >> 40;
+ p[6] = data >> 48;
+ p[7] = data >> 56;
+ } else {
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+ }
}
-void WriteCFILength(std::vector<uint8_t>* buf) {
- uint32_t length = buf->size() - 4;
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit) {
+ uint64_t length = is_64bit ? buf->size() - 12 : buf->size() - 4;
DCHECK_EQ((length & 0x3), 0U);
- DCHECK_GT(length, 4U);
- uint8_t *p = buf->data();
- p[0] = length;
- p[1] = length >> 8;
- p[2] = length >> 16;
- p[3] = length >> 24;
+ uint8_t *p = is_64bit? buf->data() + 4 : buf->data();
+ if (is_64bit) {
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+ p[4] = length >> 32;
+ p[5] = length >> 40;
+ p[6] = length >> 48;
+ p[7] = length >> 56;
+ } else {
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+ }
}
void PadCFI(std::vector<uint8_t>* buf) {
diff --git a/compiler/utils/dwarf_cfi.h b/compiler/utils/dwarf_cfi.h
index e5acc0eb3a..0c8b1516dd 100644
--- a/compiler/utils/dwarf_cfi.h
+++ b/compiler/utils/dwarf_cfi.h
@@ -66,20 +66,24 @@ void DW_CFA_restore_state(std::vector<uint8_t>* buf);
/**
* @brief Write FDE header into an FDE buffer
* @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteFDEHeader(std::vector<uint8_t>* buf);
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit);
/**
* @brief Set 'address_range' field of an FDE buffer
* @param buf FDE buffer.
+ * @param data Data value.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data);
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit);
/**
* @brief Set 'length' field of an FDE buffer
* @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteCFILength(std::vector<uint8_t>* buf);
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit);
/**
* @brief Pad an FDE buffer with 0 until its size is a multiple of 4
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 48edb157f6..2c9bc28923 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1409,13 +1409,13 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode,
}
void X86Assembler::InitializeFrameDescriptionEntry() {
- WriteFDEHeader(&cfi_info_);
+ WriteFDEHeader(&cfi_info_, false /* is_64bit */);
}
void X86Assembler::FinalizeFrameDescriptionEntry() {
- WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size(), false /* is_64bit */);
PadCFI(&cfi_info_);
- WriteCFILength(&cfi_info_);
+ WriteCFILength(&cfi_info_, false /* is_64bit */);
}
constexpr size_t kFramePointerSize = 4;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 62b72c234a..1e2884a88c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1716,13 +1716,13 @@ void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const
}
void X86_64Assembler::InitializeFrameDescriptionEntry() {
- WriteFDEHeader(&cfi_info_);
+ WriteFDEHeader(&cfi_info_, true /* is_64bit */);
}
void X86_64Assembler::FinalizeFrameDescriptionEntry() {
- WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size(), true /* is_64bit */);
PadCFI(&cfi_info_);
- WriteCFILength(&cfi_info_);
+ WriteCFILength(&cfi_info_, true /* is_64bit */);
}
constexpr size_t kFramePointerSize = 8;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 0ca8962282..0bf758efb9 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -702,12 +702,24 @@ DISASSEMBLER_ENTRY(cmp,
load = true;
immediate_bytes = 1;
break;
+ case 0xA5:
+ opcode << "shld";
+ has_modrm = true;
+ load = true;
+ cx = true;
+ break;
case 0xAC:
opcode << "shrd";
has_modrm = true;
load = true;
immediate_bytes = 1;
break;
+ case 0xAD:
+ opcode << "shrd";
+ has_modrm = true;
+ load = true;
+ cx = true;
+ break;
case 0xAE:
if (prefix[0] == 0xF3) {
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index eed20da178..bbdf3a3e5c 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -569,6 +569,11 @@ bool PatchOat::PatchElf() {
}
}
+ t.NewTiming("Fixup Debug Sections");
+ if (!oat_file_->FixupDebugSections(delta_)) {
+ return false;
+ }
+
return true;
}
@@ -818,22 +823,12 @@ static int patchoat(int argc, char **argv) {
if (log_options) {
LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
}
- // TODO: GetInstructionSetFromString shouldn't LOG(FATAL).
if (option.starts_with("--instruction-set=")) {
isa_set = true;
const char* isa_str = option.substr(strlen("--instruction-set=")).data();
- if (!strcmp("arm", isa_str)) {
- isa = kArm;
- } else if (!strcmp("arm64", isa_str)) {
- isa = kArm64;
- } else if (!strcmp("x86", isa_str)) {
- isa = kX86;
- } else if (!strcmp("x86_64", isa_str)) {
- isa = kX86_64;
- } else if (!strcmp("mips", isa_str)) {
- isa = kMips;
- } else {
- Usage("Unknown instruction set %s", isa_str);
+ isa = GetInstructionSetFromString(isa_str);
+ if (isa == kNone) {
+ Usage("Unknown or invalid instruction set %s", isa_str);
}
} else if (option.starts_with("--input-oat-location=")) {
if (have_input_oat) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index f55d3fbe12..1f2f86ec9f 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -106,7 +106,7 @@ LIBART_COMMON_SRC_FILES := \
mirror/string.cc \
mirror/throwable.cc \
monitor.cc \
- native_bridge.cc \
+ native_bridge_art_interface.cc \
native/dalvik_system_DexFile.cc \
native/dalvik_system_VMDebug.cc \
native/dalvik_system_VMRuntime.cc \
@@ -417,7 +417,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_SHARED_LIBRARIES += liblog libnativehelper
+ LOCAL_SHARED_LIBRARIES += liblog libnativehelper libnativebridge
include external/libcxx/libcxx.mk
LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
diff --git a/runtime/arch/arm/arm_sdiv.S b/runtime/arch/arm/arm_sdiv.S
index 925e428444..babdbf5526 100644
--- a/runtime/arch/arm/arm_sdiv.S
+++ b/runtime/arch/arm/arm_sdiv.S
@@ -9,7 +9,7 @@
#include "asm_support_arm.S"
.section .text
-ENTRY CheckForARMSDIVInstruction
+ENTRY_NO_HIDE CheckForARMSDIVInstruction
mov r1,#1
// depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index e1b0ce7e17..a3e3b21c7e 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -33,6 +33,7 @@
.macro ENTRY name
.thumb_func
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
/* Cache alignment for function entry */
.balign 16
@@ -41,9 +42,35 @@
.fnstart
.endm
+.macro ENTRY_NO_HIDE name
+ .thumb_func
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+.endm
+
+
.macro ARM_ENTRY name
.arm
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ /* Ensure we get a sane starting CFA. */
+ .cfi_def_cfa sp,0
+ .fnstart
+.endm
+
+.macro ARM_ENTRY_NO_HIDE name
+ .arm
+ .type \name, #function
.global \name
/* Cache alignment for function entry */
.balign 16
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 98d17dc830..3491c18c37 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -138,7 +138,7 @@ ENTRY art_portable_resolution_trampoline
END art_portable_resolution_trampoline
.extern artPortableToInterpreterBridge
-ENTRY art_portable_to_interpreter_bridge
+ENTRY_NO_HIDE art_portable_to_interpreter_bridge
@ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
@ TODO: just save the registers that are needed in artPortableToInterpreterBridge.
push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dd1f04a9b4..1b30c9cca2 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -308,9 +308,12 @@ ENTRY art_quick_invoke_stub
#ifdef ARM_R4_SUSPEND_FLAG
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- add r5, r2, #16 @ create space for method pointer in frame
- and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
- sub sp, r5 @ reserve stack space for argument array
+ add r5, r2, #4 @ create space for method pointer in frame
+
+ sub r5, sp, r5 @ reserve & align *stack* to 16 bytes: native calling
+ and r5, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART
+ mov sp, r5 @ 16B alignment ourselves.
+
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
ldr r0, [r11] @ restore method*
@@ -988,7 +991,7 @@ END art_quick_resolution_trampoline
/*
* Called to do a generic JNI down-call
*/
-ENTRY art_quick_generic_jni_trampoline
+ENTRY_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
str r0, [sp, #0] // Store native ArtMethod* to bottom of stack.
@@ -1034,14 +1037,13 @@ ENTRY art_quick_generic_jni_trampoline
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
- // r0 r1,r2 r3,stack <= C calling convention
+ // r0 r2,r3 stack <= C calling convention
// r11 r0,r1 r0,r1 <= where they are
- sub sp, sp, #12 // Stack alignment.
+ sub sp, sp, #8 // Stack alignment.
- push {r1}
- mov r3, r0
- mov r2, r1
- mov r1, r0
+ push {r0-r1}
+ mov r3, r1
+ mov r2, r0
mov r0, r11
blx artQuickGenericJniEndTrampoline
@@ -1058,7 +1060,18 @@ ENTRY art_quick_generic_jni_trampoline
cbnz r2, .Lexception_in_native
// Tear down the callee-save frame.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ add sp, #12 @ rewind sp
+ // Do not pop r0 and r1, they contain the return value.
+ pop {r2-r3, r5-r8, r10-r11, lr} @ 9 words of callee saves
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_adjust_cfa_offset -48
bx lr // ret
@@ -1073,7 +1086,7 @@ ENTRY art_quick_generic_jni_trampoline
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
+ENTRY_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index be167faae6..fb49460364 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -44,6 +44,16 @@
.macro ENTRY name
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro ENTRY_NO_HIDE name
+ .type \name, #function
.global \name
/* Cache alignment for function entry */
.balign 16
@@ -62,4 +72,10 @@
END \name
.endm
+.macro UNIMPLEMENTED_NO_HIDE name
+ ENTRY_NO_HIDE \name
+ brk 0
+ END \name
+.endm
+
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
diff --git a/runtime/arch/arm64/portable_entrypoints_arm64.S b/runtime/arch/arm64/portable_entrypoints_arm64.S
index e136885c7e..41711b5c66 100644
--- a/runtime/arch/arm64/portable_entrypoints_arm64.S
+++ b/runtime/arch/arm64/portable_entrypoints_arm64.S
@@ -25,4 +25,4 @@ UNIMPLEMENTED art_portable_proxy_invoke_handler
UNIMPLEMENTED art_portable_resolution_trampoline
-UNIMPLEMENTED art_portable_to_interpreter_bridge
+UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ab9035ae45..2a19e27b04 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1435,7 +1435,7 @@ END art_quick_resolution_trampoline
/*
* Called to do a generic JNI down-call
*/
-ENTRY art_quick_generic_jni_trampoline
+ENTRY_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
str x0, [sp, #0] // Store native ArtMethod* to bottom of stack.
@@ -1531,7 +1531,7 @@ END art_quick_generic_jni_trampoline
* x0 = method being called/to bridge to.
* x1..x7, d0..d7 = arguments to that method.
*/
-ENTRY art_quick_to_interpreter_bridge
+ENTRY_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
// x0 will contain mirror::ArtMethod* method.
diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc
index 7928085221..5a3e73eebc 100644
--- a/runtime/arch/memcmp16.cc
+++ b/runtime/arch/memcmp16.cc
@@ -28,4 +28,16 @@ int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t c
return 0;
}
+namespace art {
+
+namespace testing {
+
+int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count) {
+ return MemCmp16(s0, s1, count);
+}
+
+}
+
+} // namespace art
+
#pragma GCC diagnostic warning "-Wunused-function"
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index 14dc1e3880..4b9fb8eff6 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -50,4 +50,17 @@ static inline int32_t MemCmp16(const uint16_t* s0, const uint16_t* s1, size_t co
extern "C" int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count);
#endif
+namespace art {
+
+namespace testing {
+
+// A version that is exposed and relatively "close to the metal," so that memcmp16_test can do
+// some reasonable testing. Without this, as __memcmp16 is hidden, the test cannot access the
+// implementation.
+int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count);
+
+}
+
+} // namespace art
+
#endif // ART_RUNTIME_ARCH_MEMCMP16_H_
diff --git a/runtime/arch/memcmp16_test.cc b/runtime/arch/memcmp16_test.cc
index 5747c67ea3..5ba06f82a2 100644
--- a/runtime/arch/memcmp16_test.cc
+++ b/runtime/arch/memcmp16_test.cc
@@ -139,7 +139,7 @@ static void CheckSeparate(size_t max_length, size_t min_length) {
size_t mod_min = c1_mod < c2_mod ? c1_mod : c2_mod;
int32_t expected = memcmp16_compare(s1_pot_unaligned, s2_pot_unaligned, mod_min);
- int32_t computed = MemCmp16(s1_pot_unaligned, s2_pot_unaligned, mod_min);
+ int32_t computed = art::testing::MemCmp16Testing(s1_pot_unaligned, s2_pot_unaligned, mod_min);
ASSERT_EQ(expected, computed) << "Run " << round << ", c1=" << count1 << " c2=" << count2;
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 4db5ea6033..6add93b40e 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -22,9 +22,9 @@
// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 25f9a5a251..864e3f7ad0 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -17,6 +17,7 @@
#include <cstdio>
#include "common_runtime_test.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -543,15 +544,21 @@ class StubTest : public CommonRuntimeTest {
#endif
}
+ static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
+ int32_t offset;
+#ifdef __LP64__
+ offset = GetThreadOffset<8>(entrypoint).Int32Value();
+#else
+ offset = GetThreadOffset<4>(entrypoint).Int32Value();
+#endif
+ return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
+ }
+
protected:
size_t fp_result;
};
-#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_memcpy(void);
-#endif
-
TEST_F(StubTest, Memcpy) {
#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -564,7 +571,7 @@ TEST_F(StubTest, Memcpy) {
}
Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
- 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
+ 10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
EXPECT_EQ(orig[0], trg[0]);
@@ -589,15 +596,14 @@ TEST_F(StubTest, Memcpy) {
#endif
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_lock_object(void);
-#endif
-
TEST_F(StubTest, LockObject) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
+
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -609,8 +615,7 @@ TEST_F(StubTest, LockObject) {
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after = obj->GetLockWord(false);
LockWord::LockState new_state = lock_after.GetState();
@@ -618,8 +623,7 @@ TEST_F(StubTest, LockObject) {
EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
for (size_t i = 1; i < kThinLockLoops; ++i) {
- Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
// Check we're at lock count i
@@ -635,8 +639,7 @@ TEST_F(StubTest, LockObject) {
obj2->IdentityHashCode();
- Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after2 = obj2->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
@@ -665,17 +668,15 @@ class RandGen {
};
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_lock_object(void);
-extern "C" void art_quick_unlock_object(void);
-#endif
-
// NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
+ const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -687,8 +688,7 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
// This should be an illegal monitor state.
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -697,15 +697,13 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
LockWord::LockState new_state = lock_after.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after2 = obj->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
LockWord lock_after3 = obj->GetLockWord(false);
LockWord::LockState new_state3 = lock_after3.GetState();
@@ -759,12 +757,12 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
}
if (lock) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
+ self);
counts[index]++;
} else {
test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ art_quick_unlock_object, self);
counts[index]--;
}
@@ -795,8 +793,8 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
size_t index = kNumberOfLocks - 1 - i;
size_t count = counts[index];
while (count > 0) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
+ self);
count--;
}
@@ -825,6 +823,9 @@ extern "C" void art_quick_check_cast(void);
TEST_F(StubTest, CheckCast) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
+
// Find some classes.
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -838,24 +839,24 @@ TEST_F(StubTest, CheckCast) {
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
// TODO: Make the following work. But that would require correct managed frames.
Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -868,16 +869,16 @@ TEST_F(StubTest, CheckCast) {
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
-// Do not check non-checked ones, we'd need handlers and stuff...
-#endif
-
TEST_F(StubTest, APutObj) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
+
+ // Do not check non-checked ones, we'd need handlers and stuff...
+ const uintptr_t art_quick_aput_obj_with_null_and_bound_check =
+ StubTest::GetEntrypoint(self, kQuickAputObjectWithNullAndBoundCheck);
+
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -907,25 +908,25 @@ TEST_F(StubTest, APutObj) {
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(0));
Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(1));
Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(2));
Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(3));
@@ -933,25 +934,25 @@ TEST_F(StubTest, APutObj) {
// 1.2) Assign null to array[0..3]
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(0));
Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(1));
Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(2));
Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(3));
@@ -972,7 +973,7 @@ TEST_F(StubTest, APutObj) {
Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -980,7 +981,7 @@ TEST_F(StubTest, APutObj) {
// 2.3) Index > 0
Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -988,7 +989,7 @@ TEST_F(StubTest, APutObj) {
// 3) Failure cases (obj into str[])
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -1024,7 +1025,7 @@ TEST_F(StubTest, AllocObject) {
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObject),
+ StubTest::GetEntrypoint(self, kQuickAllocObject),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1038,7 +1039,7 @@ TEST_F(StubTest, AllocObject) {
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1052,7 +1053,7 @@ TEST_F(StubTest, AllocObject) {
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1108,7 +1109,7 @@ TEST_F(StubTest, AllocObject) {
self->ClearException();
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -1154,7 +1155,7 @@ TEST_F(StubTest, AllocObjectArray) {
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
10U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArray),
+ StubTest::GetEntrypoint(self, kQuickAllocArray),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1169,7 +1170,7 @@ TEST_F(StubTest, AllocObjectArray) {
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 10U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1188,7 +1189,7 @@ TEST_F(StubTest, AllocObjectArray) {
{
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr),
GB, // that should fail...
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -1205,10 +1206,6 @@ TEST_F(StubTest, AllocObjectArray) {
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_string_compareto(void);
-#endif
-
TEST_F(StubTest, StringCompareTo) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1216,6 +1213,9 @@ TEST_F(StubTest, StringCompareTo) {
// TODO: Check the "Unresolved" allocation stubs
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
+
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -1274,7 +1274,7 @@ TEST_F(StubTest, StringCompareTo) {
// Test string_compareto x y
size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
reinterpret_cast<size_t>(s[y].Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
+ art_quick_string_compareto, self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1306,11 +1306,6 @@ TEST_F(StubTest, StringCompareTo) {
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set32_static(void);
-extern "C" void art_quick_get32_static(void);
-#endif
-
static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1322,13 +1317,13 @@ static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_set32_static),
+ StubTest::GetEntrypoint(self, kQuickSet32Static),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get32_static),
+ StubTest::GetEntrypoint(self, kQuickGet32Static),
self,
referrer);
@@ -1342,11 +1337,6 @@ static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set32_instance(void);
-extern "C" void art_quick_get32_instance(void);
-#endif
-
static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1358,7 +1348,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
- reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
+ StubTest::GetEntrypoint(self, kQuickSet32Instance),
self,
referrer);
@@ -1371,7 +1361,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
+ StubTest::GetEntrypoint(self, kQuickGet32Instance),
self,
referrer);
EXPECT_EQ(res, static_cast<int32_t>(res2));
@@ -1385,8 +1375,6 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set_obj_static(void);
-extern "C" void art_quick_get_obj_static(void);
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
@@ -1394,13 +1382,13 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_set_obj_static),
+ StubTest::GetEntrypoint(self, kQuickSetObjStatic),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get_obj_static),
+ StubTest::GetEntrypoint(self, kQuickGetObjStatic),
self,
referrer);
@@ -1428,9 +1416,6 @@ static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set_obj_instance(void);
-extern "C" void art_quick_get_obj_instance(void);
-
static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
@@ -1438,14 +1423,14 @@ static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object*
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
- reinterpret_cast<uintptr_t>(&art_quick_set_obj_instance),
+ StubTest::GetEntrypoint(self, kQuickSetObjInstance),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get_obj_instance),
+ StubTest::GetEntrypoint(self, kQuickGetObjInstance),
self,
referrer);
@@ -1476,11 +1461,6 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFie
// TODO: Complete these tests for 32b architectures.
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
-extern "C" void art_quick_set64_static(void);
-extern "C" void art_quick_get64_static(void);
-#endif
-
static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1491,13 +1471,13 @@ static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
values[i],
- reinterpret_cast<uintptr_t>(&art_quick_set64_static),
+ StubTest::GetEntrypoint(self, kQuickSet64Static),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get64_static),
+ StubTest::GetEntrypoint(self, kQuickGet64Static),
self,
referrer);
@@ -1511,11 +1491,6 @@ static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
}
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
-extern "C" void art_quick_set64_instance(void);
-extern "C" void art_quick_get64_instance(void);
-#endif
-
static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1527,7 +1502,7 @@ static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
- reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
+ StubTest::GetEntrypoint(self, kQuickSet64Instance),
self,
referrer);
@@ -1540,7 +1515,7 @@ static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
+ StubTest::GetEntrypoint(self, kQuickGet64Instance),
self,
referrer);
EXPECT_EQ(res, static_cast<int64_t>(res2));
@@ -1683,9 +1658,6 @@ TEST_F(StubTest, Fields64) {
TestFields(self, this, Primitive::Type::kPrimLong);
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_imt_conflict_trampoline(void);
-#endif
TEST_F(StubTest, IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
@@ -1716,7 +1688,7 @@ TEST_F(StubTest, IMT) {
// Patch up ArrayList.contains.
if (contains_amethod.Get()->GetEntryPointFromQuickCompiledCode() == nullptr) {
contains_amethod.Get()->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
- GetTlsPtr(self)->quick_entrypoints.pQuickToInterpreterBridge));
+ StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
}
// List
@@ -1765,7 +1737,7 @@ TEST_F(StubTest, IMT) {
size_t result =
Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_imt_conflict_trampoline),
+ StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
self, contains_amethod.Get(),
static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
@@ -1782,7 +1754,7 @@ TEST_F(StubTest, IMT) {
result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_imt_conflict_trampoline),
+ StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
self, contains_amethod.Get(),
static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
@@ -1795,10 +1767,6 @@ TEST_F(StubTest, IMT) {
#endif
}
-#if defined(__arm__) || defined(__aarch64__)
-extern "C" void art_quick_indexof(void);
-#endif
-
TEST_F(StubTest, StringIndexOf) {
#if defined(__arm__) || defined(__aarch64__)
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1848,7 +1816,7 @@ TEST_F(StubTest, StringIndexOf) {
// Test string_compareto x y
size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
- reinterpret_cast<uintptr_t>(&art_quick_indexof), self);
+ StubTest::GetEntrypoint(self, kQuickIndexOf), self);
EXPECT_FALSE(self->IsExceptionPending());
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 96c2c05e35..a578023c4c 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -112,6 +112,7 @@
#define PLT_SYMBOL(name) _ ## name
#endif
+// Directive to hide a function symbol.
#if defined(__APPLE__)
#define ASM_HIDDEN .private_extern
#else
@@ -125,6 +126,17 @@ END_MACRO
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(\c_name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ CFI_STARTPROC
+ // Ensure we get a sane starting CFA.
+ CFI_DEF_CFA(esp, 4)
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
+ FUNCTION_TYPE(\c_name, 0)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index c143c5d825..65a48f6b20 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -66,7 +66,7 @@ namespace art {
#if defined(__APPLE__) && defined(__x86_64__)
// mac symbols have a prefix of _ on x86_64
extern "C" void _art_quick_throw_null_pointer_exception();
-extern "C" void _art_quick_throw_stack_overflow_from_signal();
+extern "C" void _art_quick_throw_stack_overflow();
extern "C" void _art_quick_test_suspend();
#define EXT_SYM(sym) _ ## sym
#else
@@ -395,7 +395,7 @@ bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
// the previous frame.
// Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_stack_overflow));
return true;
}
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index 5f270f8087..9365795fd6 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -115,7 +115,7 @@ DEFINE_FUNCTION art_portable_resolution_trampoline
ret
END_FUNCTION art_portable_resolution_trampoline
-DEFINE_FUNCTION art_portable_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_portable_to_interpreter_bridge
PUSH ebp // Set up frame.
movl %esp, %ebp
CFI_DEF_CFA_REGISTER(%ebp)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 75ec49deb0..2f3e31797d 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -106,7 +106,7 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
int3 // unreached
END_MACRO
@@ -121,7 +121,7 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -137,7 +137,7 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -153,7 +153,7 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -161,7 +161,6 @@ END_MACRO
/*
* Called by managed code to create and deliver a NullPointerException.
*/
- ASM_HIDDEN art_quick_throw_null_pointer_exception
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
@@ -189,7 +188,6 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
- ASM_HIDDEN art_quick_throw_array_bounds
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
@@ -231,7 +229,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
CFI_ADJUST_CFA_OFFSET(-36)
@@ -253,7 +251,6 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
- ASM_HIDDEN art_quick_invoke_interface_trampoline
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
@@ -328,7 +325,7 @@ MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -347,7 +344,7 @@ MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -366,7 +363,7 @@ MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -388,7 +385,7 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -569,7 +566,7 @@ DEFINE_FUNCTION art_quick_lock_object
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call PLT_SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -603,7 +600,7 @@ DEFINE_FUNCTION art_quick_unlock_object
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call PLT_SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -615,7 +612,7 @@ DEFINE_FUNCTION art_quick_is_assignable
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -626,7 +623,7 @@ DEFINE_FUNCTION art_quick_check_cast
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
@@ -645,7 +642,7 @@ DEFINE_FUNCTION art_quick_check_cast
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -660,7 +657,6 @@ DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
jmp SYMBOL(art_quick_throw_null_pointer_exception)
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
- ASM_HIDDEN art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
movl ARRAY_LENGTH_OFFSET(%eax), %ebx
cmpl %ebx, %ecx
@@ -670,7 +666,6 @@ DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
jmp SYMBOL(art_quick_throw_array_bounds)
END_FUNCTION art_quick_aput_obj_with_bound_check
- ASM_HIDDEN art_quick_aput_obj
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
@@ -697,7 +692,7 @@ DEFINE_FUNCTION art_quick_aput_obj
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg1 - component type of the array
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
+ call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
testl %eax, %eax
@@ -722,7 +717,7 @@ DEFINE_FUNCTION art_quick_aput_obj
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
- call PLT_SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -744,7 +739,7 @@ DEFINE_FUNCTION art_quick_d2l
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(art_d2l) // (jdouble a)
+ call SYMBOL(art_d2l) // (jdouble a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -755,7 +750,7 @@ DEFINE_FUNCTION art_quick_f2l
CFI_ADJUST_CFA_OFFSET(8)
SETUP_GOT_NOSAVE // clobbers EBX
PUSH eax // pass arg1 a
- call PLT_SYMBOL(art_f2l) // (jfloat a)
+ call SYMBOL(art_f2l) // (jfloat a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -769,7 +764,7 @@ DEFINE_FUNCTION art_quick_ldiv
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artLdiv) // (jlong a, jlong b)
+ call SYMBOL(artLdiv) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
@@ -783,7 +778,7 @@ DEFINE_FUNCTION art_quick_lmod
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artLmod) // (jlong a, jlong b)
+ call SYMBOL(artLmod) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
@@ -851,7 +846,7 @@ DEFINE_FUNCTION art_quick_set32_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -871,7 +866,7 @@ DEFINE_FUNCTION art_quick_set64_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
+ call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -892,7 +887,7 @@ DEFINE_FUNCTION art_quick_set_obj_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -912,7 +907,7 @@ DEFINE_FUNCTION art_quick_get32_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -932,7 +927,7 @@ DEFINE_FUNCTION art_quick_get64_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -952,7 +947,7 @@ DEFINE_FUNCTION art_quick_get_obj_instance
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -972,7 +967,7 @@ DEFINE_FUNCTION art_quick_set32_static
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -993,7 +988,7 @@ DEFINE_FUNCTION art_quick_set64_static
PUSH ebx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1013,7 +1008,7 @@ DEFINE_FUNCTION art_quick_set_obj_static
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
@@ -1029,7 +1024,7 @@ DEFINE_FUNCTION art_quick_get32_static
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1046,7 +1041,7 @@ DEFINE_FUNCTION art_quick_get64_static
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1063,7 +1058,7 @@ DEFINE_FUNCTION art_quick_get_obj_static
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1078,7 +1073,7 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler
PUSH ecx // pass receiver
PUSH eax // pass proxy method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1110,7 +1105,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
PUSH ecx // pass receiver
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
+ call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
test %eax, %eax // if code pointer is NULL goto deliver pending exception
@@ -1128,7 +1123,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
-DEFINE_FUNCTION art_quick_generic_jni_trampoline
+DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
// This also stores the native ArtMethod reference at the bottom of the stack.
@@ -1145,7 +1140,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
pushl %ebp // Pass SP (to ArtMethod).
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
SETUP_GOT_NOSAVE // Clobbers ebx.
- call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
+ call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
@@ -1168,14 +1163,14 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// (esp) 4(esp) 12(esp) <= C calling convention
- // fs:... eax:edx xmm0 <= where they are
+ // fs:... eax:edx fp0 <= where they are
subl LITERAL(20), %esp // Padding & pass float result.
- movsd %xmm0, (%esp)
+ fstpl (%esp)
pushl %edx // Pass int result.
pushl %eax
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
+ call SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
movl %ebp, %esp
@@ -1196,7 +1191,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
POP ebp // Restore callee saves
POP esi
POP edi
- // store into fpr, for when it's a fpr return...
+ // Quick expects the return value to be in xmm0.
movd %eax, %xmm0
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1209,7 +1204,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
-DEFINE_FUNCTION art_quick_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame
mov %esp, %edx // remember SP
PUSH eax // alignment padding
@@ -1218,7 +1213,7 @@ DEFINE_FUNCTION art_quick_to_interpreter_bridge
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
+ call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1245,7 +1240,7 @@ DEFINE_FUNCTION art_quick_instrumentation_entry
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
movl 28(%esp), %edi // Restore edi.
movl %eax, 28(%esp) // Place code* over edi, just under return pc.
@@ -1280,7 +1275,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
mov %eax, %ecx // Move returned link register.
addl LITERAL(32), %esp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-32)
@@ -1310,7 +1305,7 @@ DEFINE_FUNCTION art_quick_deoptimize
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 682ba430bd..4ae61a2a65 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -107,6 +107,13 @@
#define PLT_SYMBOL(name) _ ## name
#endif
+// Directive to hide a function symbol.
+#if defined(__APPLE__)
+ #define ASM_HIDDEN .private_extern
+#else
+ #define ASM_HIDDEN .hidden
+#endif
+
/* Cache alignment for function entry */
MACRO0(ALIGN_FUNCTION_ENTRY)
.balign 16
@@ -116,13 +123,20 @@ END_MACRO
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(\c_name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ CFI_STARTPROC
+ // Ensure we get a sane starting CFA.
+ CFI_DEF_CFA(rsp, 8)
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
+ FUNCTION_TYPE(\c_name, 0)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
-#if !defined(__APPLE__)
- // Have a local entrypoint that's not globl
-VAR(c_name, 0)_local:
-#endif
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
@@ -147,6 +161,19 @@ END_MACRO
MACRO1(UNIMPLEMENTED,name)
FUNCTION_TYPE(\name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
+ .globl VAR(name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+ CFI_STARTPROC
+ int3
+ int3
+ CFI_ENDPROC
+ SIZE(\name, 0)
+END_MACRO
+
+MACRO1(UNIMPLEMENTED_NO_HIDE,name)
+ FUNCTION_TYPE(\name, 0)
.globl VAR(name, 0)
ALIGN_FUNCTION_ENTRY
VAR(name, 0):
diff --git a/runtime/arch/x86_64/portable_entrypoints_x86_64.S b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
index 2e9d19a899..7b84d178db 100644
--- a/runtime/arch/x86_64/portable_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
@@ -25,4 +25,4 @@ UNIMPLEMENTED art_portable_proxy_invoke_handler
UNIMPLEMENTED art_portable_resolution_trampoline
-UNIMPLEMENTED art_portable_to_interpreter_bridge
+UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 48bc240dab..f95bd22e9f 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -232,7 +232,7 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
// (Thread*, SP) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rsp, %rsi
- call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
UNREACHABLE
END_MACRO
@@ -242,7 +242,7 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
// Outgoing argument set up
movq %rsp, %rsi // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -253,7 +253,7 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
// Outgoing argument set up
movq %rsp, %rdx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -264,7 +264,7 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
// Outgoing argument set up
movq %rsp, %rcx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -329,7 +329,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread
movq %rsp, %r8 // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, caller method*, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, caller method*, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -643,7 +643,7 @@ MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
// Outgoing argument set up
movq %rsp, %rsi // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -655,7 +655,7 @@ MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
// Outgoing argument set up
movq %rsp, %rdx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -667,7 +667,7 @@ MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
// Outgoing argument set up
movq %rsp, %rcx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -679,7 +679,7 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
// Outgoing argument set up
movq %rsp, %r8 // pass SP
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -692,7 +692,7 @@ MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
movq %rsp, %rcx // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
@@ -705,7 +705,7 @@ MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
movq %rsp, %r8 // pass SP
- call PLT_VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
@@ -718,7 +718,7 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
movq %rsp, %r9 // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -887,7 +887,7 @@ DEFINE_FUNCTION art_quick_lock_object
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
movq %rsp, %rdx // pass SP
- call PLT_SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
@@ -913,7 +913,7 @@ DEFINE_FUNCTION art_quick_unlock_object
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
movq %rsp, %rdx // pass SP
- call PLT_SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
@@ -922,7 +922,7 @@ DEFINE_FUNCTION art_quick_check_cast
PUSH rdi // Save args for exc
PUSH rsi
SETUP_FP_CALLEE_SAVE_FRAME
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
@@ -937,7 +937,7 @@ DEFINE_FUNCTION art_quick_check_cast
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %rsp, %rcx // pass SP
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -958,8 +958,8 @@ DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
#else
testl %edi, %edi
// testq %rdi, %rdi
- jnz art_quick_aput_obj_with_bound_check_local
- jmp art_quick_throw_null_pointer_exception_local
+ jnz art_quick_aput_obj_with_bound_check
+ jmp art_quick_throw_null_pointer_exception
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
@@ -972,12 +972,12 @@ DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
movl ARRAY_LENGTH_OFFSET(%edi), %ecx
// movl ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx)
cmpl %ecx, %esi
- jb art_quick_aput_obj_local
+ jb art_quick_aput_obj
mov %esi, %edi
// mov %rsi, %rdi
mov %ecx, %esi
// mov %rcx, %rsi
- jmp art_quick_throw_array_bounds_local
+ jmp art_quick_throw_array_bounds
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_bound_check
@@ -1018,7 +1018,7 @@ DEFINE_FUNCTION art_quick_aput_obj
movl CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
movq %rcx, %rdi // Pass arg1 = array's component type.
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
+ call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
// Exception?
testq %rax, %rax
@@ -1057,7 +1057,7 @@ DEFINE_FUNCTION art_quick_aput_obj
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
- call PLT_SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -1099,7 +1099,7 @@ DEFINE_FUNCTION art_quick_set64_static
// field_idx is in rdi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
movq %rsp, %r8 // pass SP
- call PLT_SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
@@ -1139,7 +1139,7 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler
movq %rdi, 0(%rsp)
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
movq %rsp, %rcx // Pass SP.
- call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movq %rax, %xmm0 // Copy return value in case of float returns.
addq LITERAL(168 + 4*8), %rsp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-168 - 4*8)
@@ -1158,7 +1158,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
movl 8(%rsp), %edi // load caller Method*
movl METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
movl OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method
- jmp art_quick_invoke_interface_trampoline_local
+ jmp art_quick_invoke_interface_trampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
@@ -1166,7 +1166,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rdx
movq %rsp, %rcx
- call PLT_SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
+ call SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1254,7 +1254,7 @@ END_FUNCTION art_quick_resolution_trampoline
/*
* Called to do a generic JNI down-call
*/
-DEFINE_FUNCTION art_quick_generic_jni_trampoline
+DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
@@ -1310,7 +1310,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
// gs:... rbp <= where they are
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rbp, %rsi
- call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
+ call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
@@ -1354,7 +1354,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rax, %rsi
movq %xmm0, %rdx
- call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
+ call SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
movq %rbp, %rsp
@@ -1441,11 +1441,11 @@ END_FUNCTION art_quick_generic_jni_trampoline
* RDI = method being called / to bridge to.
* RSI, RDX, RCX, R8, R9 are arguments to that method.
*/
-DEFINE_FUNCTION art_quick_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
movq %rsp, %rdx // RDX := sp
- call PLT_SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
+ call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
movq %rax, %xmm0 // Place return value also into floating point return value.
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
@@ -1467,12 +1467,12 @@ DEFINE_FUNCTION art_quick_instrumentation_entry
movq %rsp, %rcx // Pass SP.
movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %r8 // Pass return PC.
- call PLT_SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
// %rax = result of call.
movq %r12, %rdi // Reload method pointer.
- leaq art_quick_instrumentation_exit_local(%rip), %r12 // Set up return through instrumentation
+ leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
movq %r12, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp) // exit.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1501,7 +1501,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
movq %rax, %rdx // Pass integer result.
movq %xmm0, %rcx // Pass floating-point result.
- call PLT_SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
@@ -1526,7 +1526,7 @@ DEFINE_FUNCTION art_quick_deoptimize
// Stack should be aligned now.
movq %rsp, %rsi // Pass SP.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
- call PLT_SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
@@ -1577,7 +1577,7 @@ UNIMPLEMENTED art_quick_memcmp16
DEFINE_FUNCTION art_quick_assignable_from_code
SETUP_FP_CALLEE_SAVE_FRAME
- call PLT_SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
+ call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_assignable_from_code
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f7753d476..3469eca8c3 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -23,7 +23,7 @@
namespace art {
-class MallocAllocator : public Allocator {
+class MallocAllocator FINAL : public Allocator {
public:
explicit MallocAllocator() {}
~MallocAllocator() {}
@@ -42,7 +42,7 @@ class MallocAllocator : public Allocator {
MallocAllocator g_malloc_allocator;
-class NoopAllocator : public Allocator {
+class NoopAllocator FINAL : public Allocator {
public:
explicit NoopAllocator() {}
~NoopAllocator() {}
diff --git a/runtime/base/bit_vector-inl.h b/runtime/base/bit_vector-inl.h
new file mode 100644
index 0000000000..dc13dd5b9f
--- /dev/null
+++ b/runtime/base/bit_vector-inl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+
+#include "bit_vector.h"
+#include "logging.h"
+#include "utils.h"
+
+namespace art {
+
+inline bool BitVector::IndexIterator::operator==(const IndexIterator& other) const {
+ DCHECK(bit_storage_ == other.bit_storage_);
+ DCHECK_EQ(storage_size_, other.storage_size_);
+ return bit_index_ == other.bit_index_;
+}
+
+inline int BitVector::IndexIterator::operator*() const {
+ DCHECK_LT(bit_index_, BitSize());
+ return bit_index_;
+}
+
+inline BitVector::IndexIterator& BitVector::IndexIterator::operator++() {
+ DCHECK_LT(bit_index_, BitSize());
+ bit_index_ = FindIndex(bit_index_ + 1u);
+ return *this;
+}
+
+inline BitVector::IndexIterator BitVector::IndexIterator::operator++(int) {
+ IndexIterator result(*this);
+ ++*this;
+ return result;
+}
+
+inline uint32_t BitVector::IndexIterator::FindIndex(uint32_t start_index) const {
+ DCHECK_LE(start_index, BitSize());
+ uint32_t word_index = start_index / kWordBits;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return start_index;
+ }
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
+ while (word == 0u) {
+ ++word_index;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return BitSize();
+ }
+ word = bit_storage_[word_index];
+ }
+ return word_index * 32u + CTZ(word);
+}
+
+inline void BitVector::ClearAllBits() {
+ memset(storage_, 0, storage_size_ * kWordBytes);
+}
+
+inline bool BitVector::Equal(const BitVector* src) const {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 1b9022e170..3d2f0deac5 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -16,20 +16,14 @@
#include "bit_vector.h"
+#include "allocator.h"
+#include "bit_vector-inl.h"
+
namespace art {
-// TODO: profile to make sure this is still a win relative to just using shifted masks.
-static uint32_t check_masks[32] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
- 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
- 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
- 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
- 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
- 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
- 0x40000000, 0x80000000 };
-
-static inline uint32_t BitsToWords(uint32_t bits) {
- return (bits + 31) >> 5;
+// The number of words necessary to encode bits.
+static constexpr uint32_t BitsToWords(uint32_t bits) {
+ return RoundUp(bits, 32) / 32;
}
// TODO: replace excessive argument defaulting when we are at gcc 4.7
@@ -40,10 +34,10 @@ BitVector::BitVector(uint32_t start_bits,
Allocator* allocator,
uint32_t storage_size,
uint32_t* storage)
- : allocator_(allocator),
- expandable_(expandable),
+ : storage_(storage),
storage_size_(storage_size),
- storage_(storage) {
+ allocator_(allocator),
+ expandable_(expandable) {
COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
if (storage_ == nullptr) {
@@ -56,59 +50,7 @@ BitVector::~BitVector() {
allocator_->Free(storage_);
}
-/*
- * Determine whether or not the specified bit is set.
- */
-bool BitVector::IsBitSet(uint32_t num) const {
- // If the index is over the size:
- if (num >= storage_size_ * kWordBits) {
- // Whether it is expandable or not, this bit does not exist: thus it is not set.
- return false;
- }
-
- return IsBitSet(storage_, num);
-}
-
-// Mark all bits bit as "clear".
-void BitVector::ClearAllBits() {
- memset(storage_, 0, storage_size_ * kWordBytes);
-}
-
-// Mark the specified bit as "set".
-/*
- * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
- * not using it badly or change resize mechanism.
- */
-void BitVector::SetBit(uint32_t num) {
- if (num >= storage_size_ * kWordBits) {
- DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
-
- /* Round up to word boundaries for "num+1" bits */
- uint32_t new_size = BitsToWords(num + 1);
- DCHECK_GT(new_size, storage_size_);
- uint32_t *new_storage =
- static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
- memcpy(new_storage, storage_, storage_size_ * kWordBytes);
- // Zero out the new storage words.
- memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
- // TOTO: collect stats on space wasted because of resize.
- storage_ = new_storage;
- storage_size_ = new_size;
- }
-
- storage_[num >> 5] |= check_masks[num & 0x1f];
-}
-
-// Mark the specified bit as "unset".
-void BitVector::ClearBit(uint32_t num) {
- // If the index is over the size, we don't have to do anything, it is cleared.
- if (num < storage_size_ * kWordBits) {
- // Otherwise, go ahead and clear it.
- storage_[num >> 5] &= ~check_masks[num & 0x1f];
- }
-}
-
-bool BitVector::SameBitsSet(const BitVector *src) {
+bool BitVector::SameBitsSet(const BitVector *src) const {
int our_highest = GetHighestBitSet();
int src_highest = src->GetHighestBitSet();
@@ -134,7 +76,6 @@ bool BitVector::SameBitsSet(const BitVector *src) {
return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
}
-// Intersect with another bit vector.
void BitVector::Intersect(const BitVector* src) {
uint32_t src_storage_size = src->storage_size_;
@@ -155,9 +96,6 @@ void BitVector::Intersect(const BitVector* src) {
}
}
-/*
- * Union with another bit vector.
- */
bool BitVector::Union(const BitVector* src) {
// Get the highest bit to determine how much we need to expand.
int highest_bit = src->GetHighestBitSet();
@@ -175,8 +113,7 @@ bool BitVector::Union(const BitVector* src) {
if (storage_size_ < src_size) {
changed = true;
- // Set it to reallocate.
- SetBit(highest_bit);
+ EnsureSize(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
@@ -242,21 +179,20 @@ bool BitVector::UnionIfNotIn(const BitVector* union_with, const BitVector* not_i
}
void BitVector::Subtract(const BitVector *src) {
- uint32_t src_size = src->storage_size_;
+ uint32_t src_size = src->storage_size_;
- // We only need to operate on bytes up to the smaller of the sizes of the two operands.
- unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
+ // We only need to operate on bytes up to the smaller of the sizes of the two operands.
+ unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
- // Difference until max, we know both accept it:
- // There is no need to do more:
- // If we are bigger than src, the upper bits are unchanged.
- // If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
- for (uint32_t idx = 0; idx < min_size; idx++) {
- storage_[idx] &= (~(src->GetRawStorageWord(idx)));
- }
+ // Difference until max, we know both accept it:
+ // There is no need to do more:
+ // If we are bigger than src, the upper bits are unchanged.
+ // If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
+ for (uint32_t idx = 0; idx < min_size; idx++) {
+ storage_[idx] &= (~(src->GetRawStorageWord(idx)));
+ }
}
-// Count the number of bits that are set.
uint32_t BitVector::NumSetBits() const {
uint32_t count = 0;
for (uint32_t word = 0; word < storage_size_; word++) {
@@ -265,17 +201,11 @@ uint32_t BitVector::NumSetBits() const {
return count;
}
-// Count the number of bits that are set in range [0, end).
uint32_t BitVector::NumSetBits(uint32_t end) const {
DCHECK_LE(end, storage_size_ * kWordBits);
return NumSetBits(storage_, end);
}
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
void BitVector::SetInitialBits(uint32_t num_bits) {
// If num_bits is 0, clear everything.
if (num_bits == 0) {
@@ -288,7 +218,7 @@ void BitVector::SetInitialBits(uint32_t num_bits) {
uint32_t idx;
// We can set every storage element with -1.
- for (idx = 0; idx < (num_bits >> 5); idx++) {
+ for (idx = 0; idx < WordIndex(num_bits); idx++) {
storage_[idx] = -1;
}
@@ -312,20 +242,8 @@ int BitVector::GetHighestBitSet() const {
uint32_t value = storage_[idx];
if (value != 0) {
- // Shift right for the counting.
- value /= 2;
-
- int cnt = 0;
-
- // Count the bits.
- while (value > 0) {
- value /= 2;
- cnt++;
- }
-
- // Return cnt + how many storage units still remain * the number of bits per unit.
- int res = cnt + (idx * kWordBits);
- return res;
+ // Return highest bit set in value plus bits from previous storage indexes.
+ return 31 - CLZ(value) + (idx * kWordBits);
}
}
@@ -333,23 +251,6 @@ int BitVector::GetHighestBitSet() const {
return -1;
}
-bool BitVector::EnsureSizeAndClear(unsigned int num) {
- // Check if the bitvector is expandable.
- if (IsExpandable() == false) {
- return false;
- }
-
- if (num > 0) {
- // Now try to expand by setting the last bit.
- SetBit(num - 1);
- }
-
- // We must clear all bits as per our specification.
- ClearAllBits();
-
- return true;
-}
-
void BitVector::Copy(const BitVector *src) {
// Get highest bit set, we only need to copy till then.
int highest_bit = src->GetHighestBitSet();
@@ -375,13 +276,8 @@ void BitVector::Copy(const BitVector *src) {
}
}
-bool BitVector::IsBitSet(const uint32_t* storage, uint32_t num) {
- uint32_t val = storage[num >> 5] & check_masks[num & 0x1f];
- return (val != 0);
-}
-
uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) {
- uint32_t word_end = end >> 5;
+ uint32_t word_end = WordIndex(end);
uint32_t partial_word_bits = end & 0x1f;
uint32_t count = 0u;
@@ -400,45 +296,6 @@ void BitVector::Dump(std::ostream& os, const char *prefix) const {
os << buffer.str() << std::endl;
}
-
-void BitVector::DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const {
- // Now print it to the file.
- fprintf(file, " {%s}", buffer.str().c_str());
-
- // If it isn't the last entry, add a |.
- if (last_entry == false) {
- fprintf(file, "|");
- }
-
- // Add the \n.
- fprintf(file, "\\\n");
-}
-
-void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
- std::ostringstream buffer;
- DumpHelper(prefix, buffer);
- DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesDot(FILE* file, const char* prefix, bool last_entry) const {
- std::ostringstream buffer;
- DumpIndicesHelper(prefix, buffer);
- DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const {
- // Initialize it.
- if (prefix != nullptr) {
- buffer << prefix;
- }
-
- for (size_t i = 0; i < storage_size_ * kWordBits; i++) {
- if (IsBitSet(i)) {
- buffer << i << " ";
- }
- }
-}
-
void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const {
// Initialize it.
if (prefix != nullptr) {
@@ -452,4 +309,22 @@ void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const
buffer << ')';
}
+void BitVector::EnsureSize(uint32_t idx) {
+ if (idx >= storage_size_ * kWordBits) {
+ DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << idx;
+
+ /* Round up to word boundaries for "idx+1" bits */
+ uint32_t new_size = BitsToWords(idx + 1);
+ DCHECK_GT(new_size, storage_size_);
+ uint32_t *new_storage =
+ static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
+ memcpy(new_storage, storage_, storage_size_ * kWordBytes);
+ // Zero out the new storage words.
+ memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
+ // TOTO: collect stats on space wasted because of resize.
+ storage_ = new_storage;
+ storage_size_ = new_size;
+ }
+}
+
} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index fb1646f7fc..1e28a27e94 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -18,235 +18,237 @@
#define ART_RUNTIME_BASE_BIT_VECTOR_H_
#include <stdint.h>
-#include <stddef.h>
-
-#include "allocator.h"
-#include "base/logging.h"
-#include "utils.h"
+#include <iterator>
namespace art {
+class Allocator;
+
/*
* Expanding bitmap, used for tracking resources. Bits are numbered starting
* from zero. All operations on a BitVector are unsynchronized.
*/
class BitVector {
- public:
- class IndexContainer;
-
- /**
- * @brief Convenient iterator across the indexes of the BitVector's set bits.
- *
- * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
- * to the highest index of the BitVector's set bits. Instances can be retrieved
- * only through BitVector::Indexes() which returns an IndexContainer wrapper
- * object with begin() and end() suitable for range-based loops:
- * for (uint32_t idx : bit_vector.Indexes()) {
- * // Use idx.
- * }
- */
- class IndexIterator
- : std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
- public:
- bool operator==(const IndexIterator& other) const {
- DCHECK(bit_storage_ == other.bit_storage_);
- DCHECK_EQ(storage_size_, other.storage_size_);
- return bit_index_ == other.bit_index_;
- }
-
- bool operator!=(const IndexIterator& other) const {
- return !(*this == other);
- }
-
- int operator*() const {
- DCHECK_LT(bit_index_, BitSize());
- return bit_index_;
- }
-
- IndexIterator& operator++() {
- DCHECK_LT(bit_index_, BitSize());
- bit_index_ = FindIndex(bit_index_ + 1u);
- return *this;
- }
-
- IndexIterator operator++(int) {
- IndexIterator result(*this);
- ++*this;
- return result;
- }
-
- // Helper function to check for end without comparing with bit_vector.Indexes().end().
- bool Done() const {
- return bit_index_ == BitSize();
- }
-
- private:
- struct begin_tag { };
- struct end_tag { };
-
- IndexIterator(const BitVector* bit_vector, begin_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(FindIndex(0u)) { }
-
- IndexIterator(const BitVector* bit_vector, end_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(BitSize()) { }
-
- uint32_t BitSize() const {
- return storage_size_ * kWordBits;
- }
-
- uint32_t FindIndex(uint32_t start_index) const {
- DCHECK_LE(start_index, BitSize());
- uint32_t word_index = start_index / kWordBits;
- if (UNLIKELY(word_index == storage_size_)) {
- return start_index;
- }
- uint32_t word = bit_storage_[word_index];
- // Mask out any bits in the first word we've already considered.
- word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
- while (word == 0u) {
- ++word_index;
- if (UNLIKELY(word_index == storage_size_)) {
- return BitSize();
- }
- word = bit_storage_[word_index];
- }
- return word_index * 32u + CTZ(word);
- }
-
- const uint32_t* const bit_storage_;
- const uint32_t storage_size_; // Size of vector in words.
- uint32_t bit_index_; // Current index (size in bits).
-
- friend class BitVector::IndexContainer;
- };
-
- /**
- * @brief BitVector wrapper class for iteration across indexes of set bits.
- */
- class IndexContainer {
- public:
- explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
-
- IndexIterator begin() const {
- return IndexIterator(bit_vector_, IndexIterator::begin_tag());
- }
-
- IndexIterator end() const {
- return IndexIterator(bit_vector_, IndexIterator::end_tag());
- }
-
- private:
- const BitVector* const bit_vector_;
- };
-
- BitVector(uint32_t start_bits,
- bool expandable,
- Allocator* allocator,
- uint32_t storage_size = 0,
- uint32_t* storage = nullptr);
-
- virtual ~BitVector();
-
- void SetBit(uint32_t num);
- void ClearBit(uint32_t num);
- bool IsBitSet(uint32_t num) const;
- void ClearAllBits();
- void SetInitialBits(uint32_t num_bits);
-
- void Copy(const BitVector* src);
- void Intersect(const BitVector* src2);
- bool Union(const BitVector* src);
-
- // Set bits of union_with that are not in not_in.
- bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
-
- void Subtract(const BitVector* src);
- // Are we equal to another bit vector? Note: expandability attributes must also match.
- bool Equal(const BitVector* src) {
- return (storage_size_ == src->GetStorageSize()) &&
- (expandable_ == src->IsExpandable()) &&
- (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+ public:
+ class IndexContainer;
+
+ /**
+ * @brief Convenient iterator across the indexes of the BitVector's set bits.
+ *
+ * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
+ * to the highest index of the BitVector's set bits. Instances can be retrieved
+ * only through BitVector::Indexes() which returns an IndexContainer wrapper
+ * object with begin() and end() suitable for range-based loops:
+ * for (uint32_t idx : bit_vector.Indexes()) {
+ * // Use idx.
+ * }
+ */
+ class IndexIterator :
+ std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
+ public:
+ bool operator==(const IndexIterator& other) const;
+
+ bool operator!=(const IndexIterator& other) const {
+ return !(*this == other);
}
- /**
- * @brief Are all the bits set the same?
- * @details expandability and size can differ as long as the same bits are set.
- */
- bool SameBitsSet(const BitVector *src);
+ int operator*() const;
- uint32_t NumSetBits() const;
+ IndexIterator& operator++();
- // Number of bits set in range [0, end).
- uint32_t NumSetBits(uint32_t end) const;
+ IndexIterator operator++(int);
- IndexContainer Indexes() const {
- return IndexContainer(this);
+ // Helper function to check for end without comparing with bit_vector.Indexes().end().
+ bool Done() const {
+ return bit_index_ == BitSize();
}
- uint32_t GetStorageSize() const { return storage_size_; }
- bool IsExpandable() const { return expandable_; }
- uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
- uint32_t* GetRawStorage() { return storage_; }
- const uint32_t* GetRawStorage() const { return storage_; }
- size_t GetSizeOf() const { return storage_size_ * kWordBytes; }
+ private:
+ struct begin_tag { };
+ struct end_tag { };
- /**
- * @return the highest bit set, -1 if none are set
- */
- int GetHighestBitSet() const;
+ IndexIterator(const BitVector* bit_vector, begin_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(FindIndex(0u)) { }
- // Is bit set in storage. (No range check.)
- static bool IsBitSet(const uint32_t* storage, uint32_t num);
- // Number of bits set in range [0, end) in storage. (No range check.)
- static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+ IndexIterator(const BitVector* bit_vector, end_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(BitSize()) { }
- bool EnsureSizeAndClear(unsigned int num);
+ uint32_t BitSize() const {
+ return storage_size_ * kWordBits;
+ }
- void Dump(std::ostream& os, const char* prefix) const;
+ uint32_t FindIndex(uint32_t start_index) const;
+ const uint32_t* const bit_storage_;
+ const uint32_t storage_size_; // Size of vector in words.
+ uint32_t bit_index_; // Current index (size in bits).
- /**
- * @brief last_entry is this the last entry for the dot dumping
- * @details if not, a "|" is appended to the dump.
- */
- void DumpDot(FILE* file, const char* prefix, bool last_entry = false) const;
+ friend class BitVector::IndexContainer;
+ };
- /**
- * @brief last_entry is this the last entry for the dot dumping
- * @details if not, a "|" is appended to the dump.
- */
- void DumpIndicesDot(FILE* file, const char* prefix, bool last_entry = false) const;
+ /**
+ * @brief BitVector wrapper class for iteration across indexes of set bits.
+ */
+ class IndexContainer {
+ public:
+ explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
- protected:
- /**
- * @brief Dump the bitvector into buffer in a 00101..01 format.
- * @param buffer the ostringstream used to dump the bitvector into.
- */
- void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+ IndexIterator begin() const {
+ return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+ }
- /**
- * @brief Dump the bitvector in a 1 2 5 8 format, where the numbers are the bit set.
- * @param buffer the ostringstream used to dump the bitvector into.
- */
- void DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const;
+ IndexIterator end() const {
+ return IndexIterator(bit_vector_, IndexIterator::end_tag());
+ }
+
+ private:
+ const BitVector* const bit_vector_;
+ };
+
+ BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size = 0,
+ uint32_t* storage = nullptr);
- /**
- * @brief Wrapper to perform the bitvector dumping with the .dot format.
- * @param buffer the ostringstream used to dump the bitvector into.
+ virtual ~BitVector();
+
+ // Mark the specified bit as "set".
+ void SetBit(uint32_t idx) {
+ /*
+ * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
+ * not using it badly or change resize mechanism.
*/
- void DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const;
+ if (idx >= storage_size_ * kWordBits) {
+ EnsureSize(idx);
+ }
+ storage_[WordIndex(idx)] |= BitMask(idx);
+ }
+
+ // Mark the specified bit as "unset".
+ void ClearBit(uint32_t idx) {
+ // If the index is over the size, we don't have to do anything, it is cleared.
+ if (idx < storage_size_ * kWordBits) {
+ // Otherwise, go ahead and clear it.
+ storage_[WordIndex(idx)] &= ~BitMask(idx);
+ }
+ }
+
+ // Determine whether or not the specified bit is set.
+ bool IsBitSet(uint32_t idx) const {
+ // If the index is over the size, whether it is expandable or not, this bit does not exist:
+ // thus it is not set.
+ return (idx < (storage_size_ * kWordBits)) && IsBitSet(storage_, idx);
+ }
+
+ // Mark all bits bit as "clear".
+ void ClearAllBits();
+
+ // Mark specified number of bits as "set". Cannot set all bits like ClearAll since there might
+ // be unused bits - setting those to one will confuse the iterator.
+ void SetInitialBits(uint32_t num_bits);
+
+ void Copy(const BitVector* src);
+
+ // Intersect with another bit vector.
+ void Intersect(const BitVector* src2);
+
+ // Union with another bit vector.
+ bool Union(const BitVector* src);
+
+ // Set bits of union_with that are not in not_in.
+ bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
+
+ void Subtract(const BitVector* src);
+
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const BitVector* src) const;
+
+ /**
+ * @brief Are all the bits set the same?
+ * @details expandability and size can differ as long as the same bits are set.
+ */
+ bool SameBitsSet(const BitVector *src) const;
+
+ // Count the number of bits that are set.
+ uint32_t NumSetBits() const;
+
+ // Count the number of bits that are set in range [0, end).
+ uint32_t NumSetBits(uint32_t end) const;
+
+ IndexContainer Indexes() const {
+ return IndexContainer(this);
+ }
+
+ uint32_t GetStorageSize() const {
+ return storage_size_;
+ }
+
+ bool IsExpandable() const {
+ return expandable_;
+ }
+
+ uint32_t GetRawStorageWord(size_t idx) const {
+ return storage_[idx];
+ }
+
+ uint32_t* GetRawStorage() {
+ return storage_;
+ }
+
+ const uint32_t* GetRawStorage() const {
+ return storage_;
+ }
+
+ size_t GetSizeOf() const {
+ return storage_size_ * kWordBytes;
+ }
+
+ /**
+ * @return the highest bit set, -1 if none are set
+ */
+ int GetHighestBitSet() const;
+
+ // Is bit set in storage. (No range check.)
+ static bool IsBitSet(const uint32_t* storage, uint32_t idx) {
+ return (storage[WordIndex(idx)] & BitMask(idx)) != 0;
+ }
+
+ // Number of bits set in range [0, end) in storage. (No range check.)
+ static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+
+ void Dump(std::ostream& os, const char* prefix) const;
+
+ private:
+ /**
+ * @brief Dump the bitvector into buffer in a 00101..01 format.
+ * @param buffer the ostringstream used to dump the bitvector into.
+ */
+ void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+
+ // Ensure there is space for a bit at idx.
+ void EnsureSize(uint32_t idx);
+
+ // The index of the word within storage.
+ static constexpr uint32_t WordIndex(uint32_t idx) {
+ return idx >> 5;
+ }
+
+ // A bit mask to extract the bit for the given index.
+ static constexpr uint32_t BitMask(uint32_t idx) {
+ return 1 << (idx & 0x1f);
+ }
- private:
- static constexpr uint32_t kWordBytes = sizeof(uint32_t);
- static constexpr uint32_t kWordBits = kWordBytes * 8;
+ static constexpr uint32_t kWordBytes = sizeof(uint32_t);
+ static constexpr uint32_t kWordBits = kWordBytes * 8;
- Allocator* const allocator_;
- const bool expandable_; // expand bitmap if we run out?
- uint32_t storage_size_; // current size, in 32-bit words.
- uint32_t* storage_;
+ uint32_t* storage_; // The storage for the bit vector.
+ uint32_t storage_size_; // Current size, in 32-bit words.
+ Allocator* const allocator_; // Allocator if expandable.
+ const bool expandable_; // Should the bitmap expand if too small?
};
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 1403f50c04..df5d79d893 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -16,7 +16,8 @@
#include <memory>
-#include "bit_vector.h"
+#include "allocator.h"
+#include "bit_vector-inl.h"
#include "gtest/gtest.h"
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 99277a0629..b0f8e22ab2 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -228,7 +228,10 @@ class ScopedCheck {
}
if (invoke != kStatic) {
mirror::Object* o = soa.Decode<mirror::Object*>(jobj);
- if (!o->InstanceOf(m->GetDeclaringClass())) {
+ if (o == nullptr) {
+ AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+ return false;
+ } else if (!o->InstanceOf(m->GetDeclaringClass())) {
AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
return false;
}
@@ -292,7 +295,10 @@ class ScopedCheck {
return false;
}
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
- if (!o->InstanceOf(m->GetDeclaringClass())) {
+ if (o == nullptr) {
+ AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+ return false;
+ } else if (!o->InstanceOf(m->GetDeclaringClass())) {
AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
return false;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index db42146ffe..8fc1d072d2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -851,7 +851,8 @@ bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_
// We opened the oat file, so we must register it.
RegisterOatFile(oat_file);
}
- return true;
+ // If the file isn't executable we failed patchoat but did manage to get the dex files.
+ return oat_file->IsExecutable();
} else {
if (needs_registering) {
// We opened it, delete it.
@@ -1136,11 +1137,18 @@ const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
dex_location));
return nullptr;
- } else if (!VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
+ } else if (oat_file->IsExecutable() &&
+ !VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
"'%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg.c_str()));
return nullptr;
+ } else if (!oat_file->IsExecutable() &&
+ !VerifyOatImageChecksum(oat_file.get(), isa)) {
+ error_msgs->push_back(StringPrintf("Failed to verify non-executable oat file '%s' found for "
+ "dex location '%s'. Image checksum incorrect.",
+ oat_file->GetLocation().c_str(), dex_location));
+ return nullptr;
} else {
return oat_file.release();
}
@@ -1310,11 +1318,35 @@ const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_lo
return ret;
}
+const OatFile* ClassLinker::GetInterpretedOnlyOat(const std::string& oat_path,
+ InstructionSet isa,
+ std::string* error_msg) {
+ // We open it non-executable
+ std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, NULL, false, error_msg));
+ if (output.get() == nullptr) {
+ return nullptr;
+ }
+ if (VerifyOatImageChecksum(output.get(), isa)) {
+ return output.release();
+ } else {
+ *error_msg = StringPrintf("Could not use oat file '%s', image checksum failed to verify.",
+ oat_path.c_str());
+ return nullptr;
+ }
+}
+
const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
const std::string& output_oat,
const std::string& image_location,
InstructionSet isa,
std::string* error_msg) {
+ if (!Runtime::Current()->IsDex2OatEnabled()) {
+ // We don't have dex2oat so we can assume we don't have patchoat either. We should just use the
+ // input_oat but make sure we only do interpretation on it's dex files.
+ LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted due to dex2oat being "
+ << "disabled. Attempting to use oat file for interpretation";
+ return GetInterpretedOnlyOat(input_oat, isa, error_msg);
+ }
Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
@@ -1352,6 +1384,12 @@ const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
+ } else if (!Runtime::Current()->IsCompiler()) {
+ // patchoat failed which means we probably don't have enough room to place the output oat file,
+ // instead of failing we should just run the interpreter from the dex files in the input oat.
+ LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
+ << "for interpretation. patchoat failure was: " << *error_msg;
+ return GetInterpretedOnlyOat(input_oat, isa, error_msg);
} else {
*error_msg = StringPrintf("Patching of oat file '%s to '%s' "
"failed: %s", input_oat.c_str(), output_oat.c_str(),
@@ -2022,21 +2060,20 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
return mirror::Class::ComputeClassSize(false, 0, num_32, num_64, num_ref);
}
-bool ClassLinker::FindOatClass(const DexFile& dex_file,
- uint16_t class_def_idx,
- OatFile::OatClass* oat_class) {
- DCHECK(oat_class != nullptr);
+OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
+ bool* found) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
if (oat_file == nullptr) {
- return false;
+ *found = false;
+ return OatFile::OatClass::Invalid();
}
uint dex_location_checksum = dex_file.GetLocationChecksum();
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
- *oat_class = oat_dex_file->GetOatClass(class_def_idx);
- return true;
+ *found = true;
+ return oat_dex_file->GetOatClass(class_def_idx);
}
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2073,8 +2110,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16
return 0;
}
-bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method) {
- DCHECK(oat_method != nullptr);
+const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, bool* found) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -2101,15 +2137,14 @@ bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
method->GetDeclaringClass()->GetDexClassDefIndex(),
method->GetDexMethodIndex()));
- OatFile::OatClass oat_class;
- if (!FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
- declaring_class->GetDexClassDefIndex(),
- &oat_class)) {
- return false;
- }
-
- *oat_method = oat_class.GetOatMethod(oat_method_index);
- return true;
+ OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+ declaring_class->GetDexClassDefIndex(),
+ found);
+ if (!found) {
+ return OatFile::OatMethod::Invalid();
+ }
+ *found = true;
+ return oat_class.GetOatMethod(oat_method_index);
}
// Special case to get oat code without overwriting a trampoline.
@@ -2118,9 +2153,10 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
if (method->IsProxyMethod()) {
return GetQuickProxyInvokeHandler();
}
- OatFile::OatMethod oat_method;
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
const void* result = nullptr;
- if (FindOatMethodFor(method, &oat_method)) {
+ if (found) {
result = oat_method.GetQuickCode();
}
@@ -2146,10 +2182,11 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
if (method->IsProxyMethod()) {
return GetPortableProxyInvokeHandler();
}
- OatFile::OatMethod oat_method;
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
const void* result = nullptr;
const void* quick_code = nullptr;
- if (FindOatMethodFor(method, &oat_method)) {
+ if (found) {
result = oat_method.GetPortableCode();
quick_code = oat_method.GetQuickCode();
}
@@ -2168,10 +2205,29 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
return result;
}
+const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
+ if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+ return nullptr;
+ }
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ return found ? oat_method.GetQuickCode() : nullptr;
+}
+
+const void* ClassLinker::GetOatMethodPortableCodeFor(mirror::ArtMethod* method) {
+ if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+ return nullptr;
+ }
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ return found ? oat_method.GetPortableCode() : nullptr;
+}
+
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- OatFile::OatClass oat_class;
- if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ bool found;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+ if (!found) {
return nullptr;
}
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2180,8 +2236,9 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl
const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- OatFile::OatClass oat_class;
- if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ bool found;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+ if (!found) {
return nullptr;
}
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2234,8 +2291,9 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
while (it.HasNextInstanceField()) {
it.Next();
}
- OatFile::OatClass oat_class;
- bool has_oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class);
+ bool has_oat_class;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+ &has_oat_class);
// Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
mirror::ArtMethod* method = klass->GetDirectMethod(method_index);
@@ -2386,12 +2444,16 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
return; // no fields or methods - for example a marker interface
}
- OatFile::OatClass oat_class;
- if (Runtime::Current()->IsStarted()
- && !Runtime::Current()->UseCompileTimeClassPath()
- && FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class)) {
- LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
- } else {
+
+ bool has_oat_class = false;
+ if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
+ OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+ &has_oat_class);
+ if (has_oat_class) {
+ LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+ }
+ }
+ if (!has_oat_class) {
LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
}
}
@@ -3561,19 +3623,13 @@ mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
proxy_class->GetDirectMethods();
CHECK_EQ(proxy_direct_methods->GetLength(), 16);
mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2);
+ // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
+ // code_ too)
mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
if (constructor == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
return nullptr;
}
- // Make the proxy constructor's code always point to the uninstrumented code. This avoids
- // getting a method enter event for the proxy constructor as the proxy constructor doesn't
- // have an activation.
- bool have_portable_code;
- constructor->SetEntryPointFromQuickCompiledCode(GetQuickOatCodeFor(proxy_constructor));
- constructor->SetEntryPointFromPortableCompiledCode(GetPortableOatCodeFor(proxy_constructor,
- &have_portable_code));
-
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
constructor->SetDeclaringClass(klass.Get());
@@ -4330,7 +4386,7 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
return true;
}
}
- StackHandleScope<4> hs(self);
+ StackHandleScope<5> hs(self);
Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -4413,7 +4469,13 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
}
MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
MethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
- std::vector<mirror::ArtMethod*> miranda_list;
+ size_t max_miranda_methods = 0; // The max size of miranda_list.
+ for (size_t i = 0; i < ifcount; ++i) {
+ max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
+ }
+ Handle<mirror::ObjectArray<mirror::ArtMethod>>
+ miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
+ size_t miranda_list_size = 0; // The current size of miranda_list.
for (size_t i = 0; i < ifcount; ++i) {
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
@@ -4428,8 +4490,7 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
hs.NewHandle(klass->GetVTableDuringLinking()));
for (size_t j = 0; j < num_methods; ++j) {
- mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
- interface_mh.ChangeMethod(interface_method);
+ interface_mh.ChangeMethod(iftable->GetInterface(i)->GetVirtualMethod(j));
int32_t k;
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -4440,22 +4501,21 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
for (k = vtable->GetLength() - 1; k >= 0; --k) {
- mirror::ArtMethod* vtable_method = vtable->Get(k);
- vtable_mh.ChangeMethod(vtable_method);
+ vtable_mh.ChangeMethod(vtable->Get(k));
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
+ if (!vtable_mh.Get()->IsAbstract() && !vtable_mh.Get()->IsPublic()) {
ThrowIllegalAccessError(
klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
- PrettyMethod(vtable_method).c_str(),
- PrettyMethod(interface_method).c_str());
+ PrettyMethod(vtable_mh.Get()).c_str(),
+ PrettyMethod(interface_mh.Get()).c_str());
return false;
}
- method_array->Set<false>(j, vtable_method);
+ method_array->Set<false>(j, vtable_mh.Get());
// Place method in imt if entry is empty, place conflict otherwise.
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ uint32_t imt_index = interface_mh.Get()->GetDexMethodIndex() % mirror::Class::kImtSize;
if (imtable->Get(imt_index) == NULL) {
- imtable->Set<false>(imt_index, vtable_method);
+ imtable->Set<false>(imt_index, vtable_mh.Get());
imtable_changed = true;
} else {
imtable->Set<false>(imt_index, runtime->GetImtConflictMethod());
@@ -4466,7 +4526,9 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
if (k < 0) {
StackHandleScope<1> hs(self);
auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
- for (mirror::ArtMethod* mir_method : miranda_list) {
+ for (size_t l = 0; l < miranda_list_size; ++l) {
+ mirror::ArtMethod* mir_method = miranda_list->Get(l);
+ DCHECK(mir_method != nullptr);
vtable_mh.ChangeMethod(mir_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
miranda_method.Assign(mir_method);
@@ -4475,13 +4537,13 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
}
if (miranda_method.Get() == NULL) {
// Point the interface table at a phantom slot.
- miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
+ miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_mh.Get()->Clone(self)));
if (UNLIKELY(miranda_method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- // TODO: If a methods move then the miranda_list may hold stale references.
- miranda_list.push_back(miranda_method.Get());
+ DCHECK_LT(miranda_list_size, max_miranda_methods);
+ miranda_list->Set<false>(miranda_list_size++, miranda_method.Get());
}
method_array->Set<false>(j, miranda_method.Get());
}
@@ -4498,9 +4560,9 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
}
klass->SetImTable(imtable.Get());
}
- if (!miranda_list.empty()) {
+ if (miranda_list_size > 0) {
int old_method_count = klass->NumVirtualMethods();
- int new_method_count = old_method_count + miranda_list.size();
+ int new_method_count = old_method_count + miranda_list_size;
mirror::ObjectArray<mirror::ArtMethod>* virtuals;
if (old_method_count == 0) {
virtuals = AllocArtMethodArray(self, new_method_count);
@@ -4518,14 +4580,14 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
hs.NewHandle(klass->GetVTableDuringLinking()));
CHECK(vtable.Get() != NULL);
int old_vtable_count = vtable->GetLength();
- int new_vtable_count = old_vtable_count + miranda_list.size();
+ int new_vtable_count = old_vtable_count + miranda_list_size;
vtable.Assign(vtable->CopyOf(self, new_vtable_count));
if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- for (size_t i = 0; i < miranda_list.size(); ++i) {
- mirror::ArtMethod* method = miranda_list[i];
+ for (size_t i = 0; i < miranda_list_size; ++i) {
+ mirror::ArtMethod* method = miranda_list->Get(i);
// Leave the declaring class alone as type indices are relative to it
method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
@@ -4853,7 +4915,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::ArtMethod> referrer,
InvokeType type) {
- DCHECK(dex_cache.Get() != NULL);
+ DCHECK(dex_cache.Get() != nullptr);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
@@ -4862,9 +4924,9 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
// Fail, get the declaring class.
const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
mirror::Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
- if (klass == NULL) {
+ if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
// Scan using method_idx, this saves string compares but will only hit for matching dex
// caches/files.
@@ -4875,7 +4937,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
break;
case kInterface:
resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
- DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
@@ -4884,7 +4946,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
// Search by name, which works across dex files.
const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
const Signature signature = dex_file.GetMethodSignature(method_id);
@@ -4895,7 +4957,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
break;
case kInterface:
resolved = klass->FindInterfaceMethod(name, signature);
- DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
@@ -4903,94 +4965,97 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
break;
}
}
- if (resolved != NULL) {
- // We found a method, check for incompatible class changes.
- if (resolved->CheckIncompatibleClassChange(type)) {
- resolved = NULL;
- }
- }
- if (resolved != NULL) {
+ // If we found a method, check for incompatible class changes.
+ if (LIKELY(resolved != nullptr && !resolved->CheckIncompatibleClassChange(type))) {
// Be a good citizen and update the dex cache to speed subsequent calls.
dex_cache->SetResolvedMethod(method_idx, resolved);
return resolved;
} else {
- // We failed to find the method which means either an access error, an incompatible class
- // change, or no such method. First try to find the method among direct and virtual methods.
- const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
- const Signature signature = dex_file.GetMethodSignature(method_id);
- switch (type) {
- case kDirect:
- case kStatic:
- resolved = klass->FindVirtualMethod(name, signature);
- break;
- case kInterface:
- case kVirtual:
- case kSuper:
- resolved = klass->FindDirectMethod(name, signature);
- break;
- }
+ // If we had a method, it's an incompatible-class-change error.
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer.Get());
+ } else {
+ // We failed to find the method which means either an access error, an incompatible class
+ // change, or no such method. First try to find the method among direct and virtual methods.
+ const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
+ const Signature signature = dex_file.GetMethodSignature(method_id);
+ switch (type) {
+ case kDirect:
+ case kStatic:
+ resolved = klass->FindVirtualMethod(name, signature);
+ // Note: kDirect and kStatic are also mutually exclusive, but in that case we would
+ // have had a resolved method before, which triggers the "true" branch above.
+ break;
+ case kInterface:
+ case kVirtual:
+ case kSuper:
+ resolved = klass->FindDirectMethod(name, signature);
+ break;
+ }
- // If we found something, check that it can be accessed by the referrer.
- if (resolved != NULL && referrer.Get() != NULL) {
- mirror::Class* methods_class = resolved->GetDeclaringClass();
- mirror::Class* referring_class = referrer->GetDeclaringClass();
- if (!referring_class->CanAccess(methods_class)) {
- ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
- resolved, type);
- return NULL;
- } else if (!referring_class->CanAccessMember(methods_class,
- resolved->GetAccessFlags())) {
- ThrowIllegalAccessErrorMethod(referring_class, resolved);
- return NULL;
+ // If we found something, check that it can be accessed by the referrer.
+ if (resolved != nullptr && referrer.Get() != nullptr) {
+ mirror::Class* methods_class = resolved->GetDeclaringClass();
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
+ if (!referring_class->CanAccess(methods_class)) {
+ ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
+ resolved, type);
+ return nullptr;
+ } else if (!referring_class->CanAccessMember(methods_class,
+ resolved->GetAccessFlags())) {
+ ThrowIllegalAccessErrorMethod(referring_class, resolved);
+ return nullptr;
+ }
}
- }
- // Otherwise, throw an IncompatibleClassChangeError if we found something, and check interface
- // methods and throw if we find the method there. If we find nothing, throw a NoSuchMethodError.
- switch (type) {
- case kDirect:
- case kStatic:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
- } else {
- resolved = klass->FindInterfaceMethod(name, signature);
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ // Otherwise, throw an IncompatibleClassChangeError if we found something, and check interface
+ // methods and throw if we find the method there. If we find nothing, throw a
+ // NoSuchMethodError.
+ switch (type) {
+ case kDirect:
+ case kStatic:
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
} else {
- ThrowNoSuchMethodError(type, klass, name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
}
- }
- break;
- case kInterface:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
- } else {
- resolved = klass->FindVirtualMethod(name, signature);
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
+ break;
+ case kInterface:
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
} else {
- ThrowNoSuchMethodError(type, klass, name, signature);
+ resolved = klass->FindVirtualMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
}
- }
- break;
- case kSuper:
- ThrowNoSuchMethodError(type, klass, name, signature);
- break;
- case kVirtual:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
- } else {
- resolved = klass->FindInterfaceMethod(name, signature);
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ break;
+ case kSuper:
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ break;
+ case kVirtual:
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
} else {
- ThrowNoSuchMethodError(type, klass, name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
}
- }
- break;
+ break;
+ }
}
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6fc0f0e2f2..5694149373 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -344,6 +344,14 @@ class ClassLinker {
const void* GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get compiled code for a method, return null if no code
+ // exists. This is unlike Get..OatCodeFor which will return a bridge
+ // or interpreter entrypoint.
+ const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const void* GetOatMethodPortableCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
@@ -392,7 +400,7 @@ class ClassLinker {
}
private:
- bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
+ const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
@@ -461,9 +469,9 @@ class ClassLinker {
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds the associated oat class for a dex_file and descriptor. Returns whether the class
- // was found, and sets the data in oat_class.
- bool FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, OatFile::OatClass* oat_class)
+ // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
+ // error and sets found to false.
+ OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
@@ -565,6 +573,10 @@ class ClassLinker {
std::vector<std::string>* error_msg)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+ const OatFile* GetInterpretedOnlyOat(const std::string& oat_path,
+ InstructionSet isa,
+ std::string* error_msg);
+
const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
const std::string& image_location, InstructionSet isa,
std::string* error_msg)
@@ -744,6 +756,7 @@ class ClassLinker {
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
friend class NoDex2OatTest; // for FindOpenedOatFileForDexFile
+ friend class NoPatchoatTest; // for FindOpenedOatFileForDexFile
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index ab4a2bbdf7..eed6f7184c 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -285,19 +285,6 @@ std::string CommonRuntimeTest::GetDexFileName(const std::string& jar_prefix) {
return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
}
-std::string CommonRuntimeTest::GetLibCoreOatFileName() {
- return GetOatFileName("core");
-}
-
-std::string CommonRuntimeTest::GetOatFileName(const std::string& oat_prefix) {
- if (IsHost()) {
- const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != nullptr);
- return StringPrintf("%s/framework/%s.art", host_dir, oat_prefix.c_str());
- }
- return StringPrintf("%s/framework/%s.art", GetAndroidRoot(), oat_prefix.c_str());
-}
-
std::string CommonRuntimeTest::GetTestAndroidRoot() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 12c1241270..1ca6eb3a15 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -97,12 +97,6 @@ class CommonRuntimeTest : public testing::Test {
// Gets the path of the specified dex file for host or target.
std::string GetDexFileName(const std::string& jar_prefix);
- // Gets the path of the libcore oat file.
- std::string GetLibCoreOatFileName();
-
- // Gets the path of the specified oat file for host or target.
- std::string GetOatFileName(const std::string& oat_prefix);
-
std::string GetTestAndroidRoot();
std::vector<const DexFile*> OpenTestDexFiles(const char* name)
@@ -161,6 +155,12 @@ class CheckJniAbortCatcher {
return; \
}
+#define TEST_DISABLED_FOR_MIPS() \
+ if (kRuntimeISA == kMips || kRuntimeISA == kMips64) { \
+ printf("WARNING: TEST DISABLED FOR MIPS\n"); \
+ return; \
+ }
+
} // namespace art
namespace std {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1cddb8bc9c..6d2f21e1e5 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -664,6 +664,11 @@ void Dbg::StartJdwp() {
}
void Dbg::StopJdwp() {
+ // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
+ // destruction of gJdwpState).
+ if (gJdwpState != nullptr && gJdwpState->IsActive()) {
+ gJdwpState->PostVMDeath();
+ }
// Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
Disposed();
delete gJdwpState;
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 6179b5e8d1..566ce037ff 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1020,7 +1020,7 @@ Elf32_Shdr* ElfFile::FindSectionByName(const std::string& name) const {
return nullptr;
}
-struct PACKED(1) FDE {
+struct PACKED(1) FDE32 {
uint32_t raw_length_;
uint32_t GetLength() {
return raw_length_ + sizeof(raw_length_);
@@ -1031,25 +1031,186 @@ struct PACKED(1) FDE {
uint8_t instructions[0];
};
-static FDE* NextFDE(FDE* frame) {
+static FDE32* NextFDE(FDE32* frame) {
byte* fde_bytes = reinterpret_cast<byte*>(frame);
fde_bytes += frame->GetLength();
- return reinterpret_cast<FDE*>(fde_bytes);
+ return reinterpret_cast<FDE32*>(fde_bytes);
}
-static bool IsFDE(FDE* frame) {
+static bool IsFDE(FDE32* frame) {
return frame->CIE_pointer != 0;
}
-// TODO This only works for 32-bit Elf Files.
-static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
- FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
- FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
- for (; frame < last_frame; frame = NextFDE(frame)) {
- if (!IsFDE(frame)) {
+struct PACKED(1) FDE64 {
+ uint32_t raw_length_;
+ uint64_t extended_length_;
+ uint64_t GetLength() {
+ return extended_length_ + sizeof(raw_length_) + sizeof(extended_length_);
+ }
+ uint64_t CIE_pointer;
+ uint64_t initial_location;
+ uint64_t address_range;
+ uint8_t instructions[0];
+};
+
+static FDE64* NextFDE(FDE64* frame) {
+ byte* fde_bytes = reinterpret_cast<byte*>(frame);
+ fde_bytes += frame->GetLength();
+ return reinterpret_cast<FDE64*>(fde_bytes);
+}
+
+static bool IsFDE(FDE64* frame) {
+ return frame->CIE_pointer != 0;
+}
+
+static bool FixupEHFrame(off_t base_address_delta,
+ byte* eh_frame, size_t eh_frame_size) {
+ if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
+ FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
+ FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
+ for (; frame < last_frame; frame = NextFDE(frame)) {
+ if (!IsFDE(frame)) {
+ return false;
+ }
+ frame->initial_location += base_address_delta;
+ }
+ return true;
+ } else {
+ FDE32* last_frame = reinterpret_cast<FDE32*>(eh_frame + eh_frame_size);
+ FDE32* frame = NextFDE(reinterpret_cast<FDE32*>(eh_frame));
+ for (; frame < last_frame; frame = NextFDE(frame)) {
+ if (!IsFDE(frame)) {
+ return false;
+ }
+ frame->initial_location += base_address_delta;
+ }
+ return true;
+ }
+}
+
+static uint8_t* NextLeb128(uint8_t* current) {
+ DecodeUnsignedLeb128(const_cast<const uint8_t**>(&current));
+ return current;
+}
+
+struct PACKED(1) DebugLineHeader {
+ uint32_t unit_length_; // TODO 32-bit specific size
+ uint16_t version_;
+ uint32_t header_length_; // TODO 32-bit specific size
+ uint8_t minimum_instruction_lenght_;
+ uint8_t maximum_operations_per_instruction_;
+ uint8_t default_is_stmt_;
+ int8_t line_base_;
+ uint8_t line_range_;
+ uint8_t opcode_base_;
+ uint8_t remaining_[0];
+
+ bool IsStandardOpcode(const uint8_t* op) const {
+ return *op != 0 && *op < opcode_base_;
+ }
+
+ bool IsExtendedOpcode(const uint8_t* op) const {
+ return *op == 0;
+ }
+
+ const uint8_t* GetStandardOpcodeLengths() const {
+ return remaining_;
+ }
+
+ uint8_t* GetNextOpcode(uint8_t* op) const {
+ if (IsExtendedOpcode(op)) {
+ uint8_t* length_field = op + 1;
+ uint32_t length = DecodeUnsignedLeb128(const_cast<const uint8_t**>(&length_field));
+ return length_field + length;
+ } else if (!IsStandardOpcode(op)) {
+ return op + 1;
+ } else if (*op == DW_LNS_fixed_advance_pc) {
+ return op + 1 + sizeof(uint16_t);
+ } else {
+ uint8_t num_args = GetStandardOpcodeLengths()[*op - 1];
+ op += 1;
+ for (int i = 0; i < num_args; i++) {
+ op = NextLeb128(op);
+ }
+ return op;
+ }
+ }
+
+ uint8_t* GetDebugLineData() const {
+ const uint8_t* hdr_start =
+ reinterpret_cast<const uint8_t*>(&header_length_) + sizeof(header_length_);
+ return const_cast<uint8_t*>(hdr_start + header_length_);
+ }
+};
+
+class DebugLineInstructionIterator {
+ public:
+ static DebugLineInstructionIterator* Create(DebugLineHeader* header, size_t section_size) {
+ std::unique_ptr<DebugLineInstructionIterator> line_iter(
+ new DebugLineInstructionIterator(header, section_size));
+ if (line_iter.get() == nullptr) {
+ return nullptr;
+ } else {
+ return line_iter.release();
+ }
+ }
+
+ ~DebugLineInstructionIterator() {}
+
+ bool Next() {
+ if (current_instruction_ == nullptr) {
return false;
}
- frame->initial_location += text_start;
+ current_instruction_ = header_->GetNextOpcode(current_instruction_);
+ if (current_instruction_ >= last_instruction_) {
+ current_instruction_ = nullptr;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ uint8_t* GetInstruction() {
+ return current_instruction_;
+ }
+
+ bool IsExtendedOpcode() {
+ return header_->IsExtendedOpcode(current_instruction_);
+ }
+
+ uint8_t GetOpcode() {
+ if (!IsExtendedOpcode()) {
+ return *current_instruction_;
+ } else {
+ uint8_t* len_ptr = current_instruction_ + 1;
+ return *NextLeb128(len_ptr);
+ }
+ }
+
+ uint8_t* GetArguments() {
+ if (!IsExtendedOpcode()) {
+ return current_instruction_ + 1;
+ } else {
+ uint8_t* len_ptr = current_instruction_ + 1;
+ return NextLeb128(len_ptr) + 1;
+ }
+ }
+
+ private:
+ DebugLineInstructionIterator(DebugLineHeader* header, size_t size)
+ : header_(header), last_instruction_(reinterpret_cast<uint8_t*>(header) + size),
+ current_instruction_(header->GetDebugLineData()) {}
+
+ DebugLineHeader* header_;
+ uint8_t* last_instruction_;
+ uint8_t* current_instruction_;
+};
+
+static bool FixupDebugLine(off_t base_offset_delta, DebugLineInstructionIterator* iter) {
+ while (iter->Next()) {
+ if (iter->IsExtendedOpcode() && iter->GetOpcode() == DW_LNE_set_address) {
+ *reinterpret_cast<uint32_t*>(iter->GetArguments()) += base_offset_delta;
+ }
}
return true;
}
@@ -1189,18 +1350,27 @@ class DebugAbbrev {
public:
~DebugAbbrev() {}
static DebugAbbrev* Create(const byte* dbg_abbrev, size_t dbg_abbrev_size) {
- std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev);
- const byte* last = dbg_abbrev + dbg_abbrev_size;
- while (dbg_abbrev < last) {
+ std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
+ if (!abbrev->ReadAtOffset(0)) {
+ return nullptr;
+ }
+ return abbrev.release();
+ }
+
+ bool ReadAtOffset(uint32_t abbrev_offset) {
+ tags_.clear();
+ tag_list_.clear();
+ const byte* dbg_abbrev = begin_ + abbrev_offset;
+ while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
if (tag.get() == nullptr) {
- return nullptr;
+ return false;
} else {
- abbrev->tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, abbrev->tag_list_.size()));
- abbrev->tag_list_.push_back(std::move(tag));
+ tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, tag_list_.size()));
+ tag_list_.push_back(std::move(tag));
}
}
- return abbrev.release();
+ return true;
}
DebugTag* ReadTag(const byte* entry) {
@@ -1215,7 +1385,9 @@ class DebugAbbrev {
}
private:
- DebugAbbrev() {}
+ DebugAbbrev(const byte* begin, const byte* end) : begin_(begin), end_(end) {}
+ const byte* begin_;
+ const byte* end_;
std::map<uint32_t, uint32_t> tags_;
std::vector<std::unique_ptr<DebugTag>> tag_list_;
};
@@ -1239,11 +1411,21 @@ class DebugInfoIterator {
if (current_entry_ == nullptr || current_tag_ == nullptr) {
return false;
}
+ bool reread_abbrev = false;
current_entry_ += current_tag_->GetSize();
+ if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
+ current_cu_ = next_cu_;
+ next_cu_ = GetNextCu(current_cu_);
+ current_entry_ = reinterpret_cast<byte*>(current_cu_) + sizeof(DebugInfoHeader);
+ reread_abbrev = true;
+ }
if (current_entry_ >= last_entry_) {
current_entry_ = nullptr;
return false;
}
+ if (reread_abbrev) {
+ abbrev_->ReadAtOffset(current_cu_->debug_abbrev_offset);
+ }
current_tag_ = abbrev_->ReadTag(current_entry_);
if (current_tag_ == nullptr) {
current_entry_ = nullptr;
@@ -1271,49 +1453,91 @@ class DebugInfoIterator {
}
private:
+ static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
+ byte* hdr_byte = reinterpret_cast<byte*>(hdr);
+ return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
+ }
+
DebugInfoIterator(DebugInfoHeader* header, size_t frame_size, DebugAbbrev* abbrev)
: abbrev_(abbrev),
+ current_cu_(header),
+ next_cu_(GetNextCu(header)),
last_entry_(reinterpret_cast<byte*>(header) + frame_size),
current_entry_(reinterpret_cast<byte*>(header) + sizeof(DebugInfoHeader)),
current_tag_(abbrev_->ReadTag(current_entry_)) {}
DebugAbbrev* abbrev_;
+ DebugInfoHeader* current_cu_;
+ DebugInfoHeader* next_cu_;
byte* last_entry_;
byte* current_entry_;
DebugTag* current_tag_;
};
-static bool FixupDebugInfo(uint32_t text_start, DebugInfoIterator* iter) {
+static bool FixupDebugInfo(off_t base_address_delta, DebugInfoIterator* iter) {
do {
if (iter->GetCurrentTag()->GetAttrSize(DW_AT_low_pc) != sizeof(int32_t) ||
iter->GetCurrentTag()->GetAttrSize(DW_AT_high_pc) != sizeof(int32_t)) {
+ LOG(ERROR) << "DWARF information with 64 bit pointers is not supported yet.";
return false;
}
uint32_t* PC_low = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_low_pc));
uint32_t* PC_high = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_high_pc));
if (PC_low != nullptr && PC_high != nullptr) {
- *PC_low += text_start;
- *PC_high += text_start;
+ *PC_low += base_address_delta;
+ *PC_high += base_address_delta;
}
} while (iter->next());
return true;
}
-static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
- uintptr_t text_start,
- byte* dbg_info, size_t dbg_info_size,
- byte* eh_frame, size_t eh_frame_size) {
- std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
+bool ElfFile::FixupDebugSections(off_t base_address_delta) {
+ const Elf32_Shdr* debug_info = FindSectionByName(".debug_info");
+ const Elf32_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
+ const Elf32_Shdr* eh_frame = FindSectionByName(".eh_frame");
+ const Elf32_Shdr* debug_str = FindSectionByName(".debug_str");
+ const Elf32_Shdr* debug_line = FindSectionByName(".debug_line");
+ const Elf32_Shdr* strtab_sec = FindSectionByName(".strtab");
+ const Elf32_Shdr* symtab_sec = FindSectionByName(".symtab");
+
+ if (debug_info == nullptr || debug_abbrev == nullptr ||
+ debug_str == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+ // Release version of ART does not generate debug info.
+ return true;
+ }
+ if (base_address_delta == 0) {
+ return true;
+ }
+ if (eh_frame != nullptr &&
+ !FixupEHFrame(base_address_delta, Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+ return false;
+ }
+
+ std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(Begin() + debug_abbrev->sh_offset,
+ debug_abbrev->sh_size));
if (abbrev.get() == nullptr) {
return false;
}
- std::unique_ptr<DebugInfoIterator> iter(
- DebugInfoIterator::Create(reinterpret_cast<DebugInfoHeader*>(dbg_info),
- dbg_info_size, abbrev.get()));
- if (iter.get() == nullptr) {
+ DebugInfoHeader* info_header =
+ reinterpret_cast<DebugInfoHeader*>(Begin() + debug_info->sh_offset);
+ std::unique_ptr<DebugInfoIterator> info_iter(DebugInfoIterator::Create(info_header,
+ debug_info->sh_size,
+ abbrev.get()));
+ if (info_iter.get() == nullptr) {
return false;
}
- return FixupDebugInfo(text_start, iter.get())
- && FixupEHFrame(text_start, eh_frame, eh_frame_size);
+ if (debug_line != nullptr) {
+ DebugLineHeader* line_header =
+ reinterpret_cast<DebugLineHeader*>(Begin() + debug_line->sh_offset);
+ std::unique_ptr<DebugLineInstructionIterator> line_iter(
+ DebugLineInstructionIterator::Create(line_header, debug_line->sh_size));
+ if (line_iter.get() == nullptr) {
+ return false;
+ }
+ if (!FixupDebugLine(base_address_delta, line_iter.get())) {
+ return false;
+ }
+ }
+ return FixupDebugInfo(base_address_delta, info_iter.get());
}
void ElfFile::GdbJITSupport() {
@@ -1331,19 +1555,13 @@ void ElfFile::GdbJITSupport() {
}
ElfFile& all = *all_ptr;
- // Do we have interesting sections?
- const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
- const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
+ // We need the eh_frame for gdb but debug info might be present without it.
const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
- const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
- const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
- const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
- Elf32_Shdr* text_sec = all.FindSectionByName(".text");
- if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
- debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
- symtab_sec == nullptr) {
+ if (eh_frame == nullptr) {
return;
}
+
+ // Do we have interesting sections?
// We need to add in a strtab and symtab to the image.
// all is MAP_PRIVATE so it can be written to freely.
// We also already have strtab and symtab so we are fine there.
@@ -1354,13 +1572,9 @@ void ElfFile::GdbJITSupport() {
elf_hdr.e_phentsize = 0;
elf_hdr.e_type = ET_EXEC;
- text_sec->sh_type = SHT_NOBITS;
- text_sec->sh_offset = 0;
-
- if (!FixupDebugSections(
- all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
- all.Begin() + debug_info->sh_offset, debug_info->sh_size,
- all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+ // Since base_address_ is 0 if we are actually loaded at a known address (i.e. this is boot.oat)
+ // and the actual address stuff starts at in regular files this is good.
+ if (!all.FixupDebugSections(reinterpret_cast<intptr_t>(base_address_))) {
LOG(ERROR) << "Failed to load GDB data";
return;
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index a966bd9632..1922911e90 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -128,6 +128,8 @@ class ElfFile {
// executable is true at run time, false at compile time.
bool Load(bool executable, std::string* error_msg);
+ bool FixupDebugSections(off_t base_address_delta);
+
private:
ElfFile(File* file, bool writable, bool program_header_only);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index a0e35f88fa..cf89850fa2 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -211,7 +211,7 @@ void ThrowStackOverflowError(Thread* self) {
}
bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
- self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size.
+ self->ResetDefaultStackEnd(); // Return to default stack size.
// And restore protection if implicit checks are on.
if (!explicit_overflow_check) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4730701f2c..dfd2e11fc2 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1699,7 +1699,15 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result,
artQuickGenericJniEndJNINonRef(self, cookie, lock);
switch (return_shorty_char) {
- case 'F': // Fall-through.
+ case 'F': {
+ if (kRuntimeISA == kX86) {
+ // Convert back the result to float.
+ double d = bit_cast<uint64_t, double>(result_f);
+ return bit_cast<float, uint32_t>(static_cast<float>(d));
+ } else {
+ return result_f;
+ }
+ }
case 'D':
return result_f;
case 'Z':
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 217360f21d..3b06f74d72 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -55,7 +55,7 @@ inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, b
// scan_end is the byte after the last byte we scan.
DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(scan_end);
+ byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
CheckCardValid(card_cur);
CheckCardValid(card_end);
size_t cards_scanned = 0;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 2686af0529..3acf80d8cf 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -72,9 +72,11 @@ class ModUnionClearCardVisitor {
class ModUnionUpdateObjectReferencesVisitor {
public:
- ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg)
- : callback_(callback),
- arg_(arg) {
+ ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ space::ContinuousSpace* from_space,
+ bool* contains_reference_to_other_space)
+ : callback_(callback), arg_(arg), from_space_(from_space),
+ contains_reference_to_other_space_(contains_reference_to_other_space) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
@@ -82,7 +84,9 @@ class ModUnionUpdateObjectReferencesVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
- if (obj_ptr->AsMirrorPtr() != nullptr) {
+ mirror::Object* ref = obj_ptr->AsMirrorPtr();
+ if (ref != nullptr && !from_space_->HasAddress(ref)) {
+ *contains_reference_to_other_space_ = true;
callback_(obj_ptr, arg_);
}
}
@@ -90,24 +94,36 @@ class ModUnionUpdateObjectReferencesVisitor {
private:
MarkHeapReferenceCallback* const callback_;
void* arg_;
+ // Space which we are scanning
+ space::ContinuousSpace* const from_space_;
+ // Set if we have any references to another space.
+ bool* const contains_reference_to_other_space_;
};
class ModUnionScanImageRootVisitor {
public:
- ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg)
- : callback_(callback), arg_(arg) {}
+ ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ space::ContinuousSpace* from_space,
+ bool* contains_reference_to_other_space)
+ : callback_(callback), arg_(arg), from_space_(from_space),
+ contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_,
+ contains_reference_to_other_space_);
root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
private:
MarkHeapReferenceCallback* const callback_;
void* const arg_;
+ // Space which we are scanning
+ space::ContinuousSpace* const from_space_;
+ // Set if we have any references to another space.
+ bool* const contains_reference_to_other_space_;
};
void ModUnionTableReferenceCache::ClearCards() {
@@ -313,12 +329,20 @@ void ModUnionTableCardCache::ClearCards() {
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor scan_visitor(callback, arg);
ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
- for (const byte* card_addr : cleared_cards_) {
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ bool reference_to_other_space = false;
+ ModUnionScanImageRootVisitor scan_visitor(callback, arg, space_, &reference_to_other_space);
+ for (auto it = cleared_cards_.begin(), end = cleared_cards_.end(); it != end; ) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(*it));
DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
+ reference_to_other_space = false;
bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
+ if (!reference_to_other_space) {
+ // No non null reference to another space, remove the card.
+ it = cleared_cards_.erase(it);
+ } else {
+ ++it;
+ }
}
}
@@ -333,6 +357,17 @@ void ModUnionTableCardCache::Dump(std::ostream& os) {
os << "]";
}
+void ModUnionTableCardCache::SetCards() {
+ CardTable* card_table = heap_->GetCardTable();
+ for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ cleared_cards_.insert(card_table->CardFromAddr(addr));
+ }
+}
+
+void ModUnionTableReferenceCache::SetCards() {
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 449e171b64..f67dc274f9 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -65,6 +65,9 @@ class ModUnionTable {
// determining references to track.
virtual void ClearCards() = 0;
+ // Set all the cards.
+ virtual void SetCards() = 0;
+
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
@@ -120,6 +123,8 @@ class ModUnionTableReferenceCache : public ModUnionTable {
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCards() OVERRIDE;
+
protected:
// Cleared card array, used to update the mod-union table.
ModUnionTable::CardSet cleared_cards_;
@@ -150,6 +155,8 @@ class ModUnionTableCardCache : public ModUnionTable {
void Dump(std::ostream& os);
+ void SetCards() OVERRIDE;
+
protected:
// Cleared card array, used to update the mod-union table.
CardSet cleared_cards_;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 7d3fd2d23a..d1fb60061e 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -84,7 +84,9 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
&klass);
if (obj == nullptr) {
bool after_is_current_allocator = allocator == GetCurrentAllocator();
- if (is_current_allocator && !after_is_current_allocator) {
+ // If there is a pending exception, fail the allocation right away since the next one
+ // could cause OOM and abort the runtime.
+ if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d138d231c..f0b7685acf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,8 +128,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
long_gc_log_threshold_(long_gc_log_threshold),
ignore_max_footprint_(ignore_max_footprint),
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
- have_zygote_space_(false),
- large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
+ zygote_space_(nullptr),
+ large_object_threshold_(kDefaultLargeObjectThreshold), // Starts out disabled.
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -190,7 +190,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
if (!Runtime::Current()->IsZygote()) {
- large_object_threshold_ = kDefaultLargeObjectThreshold;
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
VLOG(heap) << "Disabling background compaction for non zygote";
@@ -468,7 +467,7 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
// After the zygote we want this to be false if we don't have background compaction enabled so
// that getting primitive array elements is faster.
// We never have homogeneous compaction with GSS and don't need a space with movable objects.
- can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
+ can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
}
if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
RemoveRememberedSet(main_space_);
@@ -801,6 +800,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
<< "\n";
}
+ if (HasZygoteSpace()) {
+ os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
+ }
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
@@ -1823,7 +1825,8 @@ void Heap::PreZygoteFork() {
Thread* self = Thread::Current();
MutexLock mu(self, zygote_creation_lock_);
// Try to see if we have any Zygote spaces.
- if (have_zygote_space_) {
+ if (HasZygoteSpace()) {
+ LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
return;
}
VLOG(heap) << "Starting PreZygoteFork";
@@ -1897,26 +1900,26 @@ void Heap::PreZygoteFork() {
// from this point on.
RemoveRememberedSet(old_alloc_space);
}
- space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
- low_memory_mode_,
- &non_moving_space_);
+ zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
+ &non_moving_space_);
CHECK(!non_moving_space_->CanMoveObjects());
if (same_space) {
main_space_ = non_moving_space_;
SetSpaceAsDefault(main_space_);
}
delete old_alloc_space;
- CHECK(zygote_space != nullptr) << "Failed creating zygote space";
- AddSpace(zygote_space);
+ CHECK(HasZygoteSpace()) << "Failed creating zygote space";
+ AddSpace(zygote_space_);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
- have_zygote_space_ = true;
- // Enable large object space allocations.
- large_object_threshold_ = kDefaultLargeObjectThreshold;
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
+ new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
+ zygote_space_);
CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
+ // Set all the cards in the mod-union table since we don't know which objects contain references
+ // to large objects.
+ mod_union_table->SetCards();
AddModUnionTable(mod_union_table);
if (collector::SemiSpace::kUseRememberedSet) {
// Add a new remembered set for the post-zygote non-moving space.
@@ -1986,7 +1989,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
// If the heap can't run the GC, silently fail and return that no GC was run.
switch (gc_type) {
case collector::kGcTypePartial: {
- if (!have_zygote_space_) {
+ if (!HasZygoteSpace()) {
return collector::kGcTypeNone;
}
break;
@@ -2483,7 +2486,6 @@ class VerifyLiveStackReferences {
bool Heap::VerifyMissingCardMarks() {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
-
// We need to sort the live stack since we binary search it.
live_stack_->Sort();
// Since we sorted the allocation stack content, need to revoke all
@@ -2491,7 +2493,6 @@ bool Heap::VerifyMissingCardMarks() {
RevokeAllThreadLocalAllocationStacks(self);
VerifyLiveStackReferences visitor(this);
GetLiveBitmap()->Visit(visitor);
-
// We can verify objects in the live stack since none of these should reference dead objects.
for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
if (!kUseThreadLocalAllocationStack || *it != nullptr) {
@@ -2689,7 +2690,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
void Heap::PostGcVerification(collector::GarbageCollector* gc) {
if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
collector::GarbageCollector::ScopedPause pause(gc);
- PreGcVerificationPaused(gc);
+ PostGcVerificationPaused(gc);
}
}
@@ -2812,7 +2813,7 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
collector::GcType non_sticky_gc_type =
- have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3035,7 +3036,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
new_native_bytes_allocated += bytes;
if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
- collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
+ collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
collector::kGcTypeFull;
// The second watermark is higher than the gc watermark. If you hit this it means you are
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d5b49d81cf..ed93ad977d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -79,6 +79,7 @@ namespace allocator {
namespace space {
class AllocSpace;
class BumpPointerSpace;
+ class ContinuousMemMapAllocSpace;
class DiscontinuousSpace;
class DlMallocSpace;
class ImageSpace;
@@ -87,7 +88,7 @@ namespace space {
class RosAllocSpace;
class Space;
class SpaceTest;
- class ContinuousMemMapAllocSpace;
+ class ZygoteSpace;
} // namespace space
class AgeCardVisitor {
@@ -599,6 +600,10 @@ class Heap {
return &reference_processor_;
}
+ bool HasZygoteSpace() const {
+ return zygote_space_ != nullptr;
+ }
+
private:
// Compact source space to target space.
void Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -849,8 +854,9 @@ class Heap {
// Lock which guards zygote space creation.
Mutex zygote_creation_lock_;
- // If we have a zygote space.
- bool have_zygote_space_;
+ // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
+ // zygote space creation.
+ space::ZygoteSpace* zygote_space_;
// Minimum allocation size of large object.
size_t large_object_threshold_;
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index d8a38f42d7..644e0556b1 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -42,21 +42,18 @@ const char* GetInstructionSetString(const InstructionSet isa) {
InstructionSet GetInstructionSetFromString(const char* isa_str) {
CHECK(isa_str != nullptr);
- if (!strcmp("arm", isa_str)) {
+ if (strcmp("arm", isa_str) == 0) {
return kArm;
- } else if (!strcmp("arm64", isa_str)) {
+ } else if (strcmp("arm64", isa_str) == 0) {
return kArm64;
- } else if (!strcmp("x86", isa_str)) {
+ } else if (strcmp("x86", isa_str) == 0) {
return kX86;
- } else if (!strcmp("x86_64", isa_str)) {
+ } else if (strcmp("x86_64", isa_str) == 0) {
return kX86_64;
- } else if (!strcmp("mips", isa_str)) {
+ } else if (strcmp("mips", isa_str) == 0) {
return kMips;
- } else if (!strcmp("none", isa_str)) {
- return kNone;
}
- LOG(FATAL) << "Unknown ISA " << isa_str;
return kNone;
}
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index f212811e32..ae8eeac54d 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -75,6 +75,8 @@ static constexpr size_t kX86Alignment = 16;
const char* GetInstructionSetString(InstructionSet isa);
+
+// Note: Returns kNone when the string cannot be parsed to a known value.
InstructionSet GetInstructionSetFromString(const char* instruction_set);
static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
index ece32386d5..ac17c4f0d4 100644
--- a/runtime/instruction_set_test.cc
+++ b/runtime/instruction_set_test.cc
@@ -29,6 +29,7 @@ TEST_F(InstructionSetTest, GetInstructionSetFromString) {
EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
}
TEST_F(InstructionSetTest, GetInstructionSetString) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ae4228487c..0f45b9e512 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -121,6 +121,11 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
// Do not change stubs for these methods.
return;
}
+ // Don't stub Proxy.<init>. Note that the Proxy class itself is not a proxy class.
+ if (method->IsConstructor() &&
+ method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;")) {
+ return;
+ }
const void* new_portable_code;
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
@@ -205,7 +210,8 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
LOG(INFO) << " Handling quick to interpreter transition. Frame " << GetFrameId();
}
CHECK_LT(instrumentation_stack_depth_, instrumentation_stack_->size());
- const InstrumentationStackFrame& frame = instrumentation_stack_->at(instrumentation_stack_depth_);
+ const InstrumentationStackFrame& frame =
+ instrumentation_stack_->at(instrumentation_stack_depth_);
CHECK(frame.interpreter_entry_);
// This is an interpreter frame so method enter event must have been reported. However we
// need to push a DEX pc into the dex_pcs_ list to match size of instrumentation stack.
@@ -231,7 +237,8 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
reached_existing_instrumentation_frames_ = true;
CHECK_LT(instrumentation_stack_depth_, instrumentation_stack_->size());
- const InstrumentationStackFrame& frame = instrumentation_stack_->at(instrumentation_stack_depth_);
+ const InstrumentationStackFrame& frame =
+ instrumentation_stack_->at(instrumentation_stack_depth_);
CHECK_EQ(m, frame.method_) << "Expected " << PrettyMethod(m)
<< ", Found " << PrettyMethod(frame.method_);
return_pc = frame.return_pc_;
@@ -324,7 +331,8 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
mirror::ArtMethod* m = GetMethod();
if (GetCurrentQuickFrame() == NULL) {
if (kVerboseInstrumentation) {
- LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId() << " Method=" << PrettyMethod(m);
+ LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
+ << " Method=" << PrettyMethod(m);
}
return true; // Ignore shadow frames.
}
@@ -405,19 +413,47 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
have_method_unwind_listeners_ = true;
}
if ((events & kDexPcMoved) != 0) {
- dex_pc_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_dex_pc_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ dex_pc_listeners_.reset(modified);
have_dex_pc_listeners_ = true;
}
if ((events & kFieldRead) != 0) {
- field_read_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_field_read_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*field_read_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ field_read_listeners_.reset(modified);
have_field_read_listeners_ = true;
}
if ((events & kFieldWritten) != 0) {
- field_write_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_field_write_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*field_write_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ field_write_listeners_.reset(modified);
have_field_write_listeners_ = true;
}
if ((events & kExceptionCaught) != 0) {
- exception_caught_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_exception_caught_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ exception_caught_listeners_.reset(modified);
have_exception_caught_listeners_ = true;
}
UpdateInterpreterHandlerTable();
@@ -427,51 +463,78 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
if ((events & kMethodEntered) != 0) {
- bool contains = std::find(method_entry_listeners_.begin(), method_entry_listeners_.end(),
- listener) != method_entry_listeners_.end();
- if (contains) {
+ if (have_method_entry_listeners_) {
method_entry_listeners_.remove(listener);
+ have_method_entry_listeners_ = !method_entry_listeners_.empty();
}
- have_method_entry_listeners_ = method_entry_listeners_.size() > 0;
}
if ((events & kMethodExited) != 0) {
- bool contains = std::find(method_exit_listeners_.begin(), method_exit_listeners_.end(),
- listener) != method_exit_listeners_.end();
- if (contains) {
+ if (have_method_exit_listeners_) {
method_exit_listeners_.remove(listener);
+ have_method_exit_listeners_ = !method_exit_listeners_.empty();
}
- have_method_exit_listeners_ = method_exit_listeners_.size() > 0;
}
if ((events & kMethodUnwind) != 0) {
- method_unwind_listeners_.remove(listener);
+ if (have_method_unwind_listeners_) {
+ method_unwind_listeners_.remove(listener);
+ have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
+ }
}
if ((events & kDexPcMoved) != 0) {
- bool contains = std::find(dex_pc_listeners_.begin(), dex_pc_listeners_.end(),
- listener) != dex_pc_listeners_.end();
- if (contains) {
- dex_pc_listeners_.remove(listener);
+ if (have_dex_pc_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
+ modified->remove(listener);
+ have_dex_pc_listeners_ = !modified->empty();
+ if (have_dex_pc_listeners_) {
+ dex_pc_listeners_.reset(modified);
+ } else {
+ dex_pc_listeners_.reset();
+ delete modified;
+ }
}
- have_dex_pc_listeners_ = dex_pc_listeners_.size() > 0;
}
if ((events & kFieldRead) != 0) {
- bool contains = std::find(field_read_listeners_.begin(), field_read_listeners_.end(),
- listener) != field_read_listeners_.end();
- if (contains) {
- field_read_listeners_.remove(listener);
+ if (have_field_read_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_read_listeners_.get());
+ modified->remove(listener);
+ have_field_read_listeners_ = !modified->empty();
+ if (have_field_read_listeners_) {
+ field_read_listeners_.reset(modified);
+ } else {
+ field_read_listeners_.reset();
+ delete modified;
+ }
}
- have_field_read_listeners_ = field_read_listeners_.size() > 0;
}
if ((events & kFieldWritten) != 0) {
- bool contains = std::find(field_write_listeners_.begin(), field_write_listeners_.end(),
- listener) != field_write_listeners_.end();
- if (contains) {
- field_write_listeners_.remove(listener);
+ if (have_field_write_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_write_listeners_.get());
+ modified->remove(listener);
+ have_field_write_listeners_ = !modified->empty();
+ if (have_field_write_listeners_) {
+ field_write_listeners_.reset(modified);
+ } else {
+ field_write_listeners_.reset();
+ delete modified;
+ }
}
- have_field_write_listeners_ = field_write_listeners_.size() > 0;
}
if ((events & kExceptionCaught) != 0) {
- exception_caught_listeners_.remove(listener);
- have_exception_caught_listeners_ = exception_caught_listeners_.size() > 0;
+ if (have_exception_caught_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
+ modified->remove(listener);
+ have_exception_caught_listeners_ = !modified->empty();
+ if (have_exception_caught_listeners_) {
+ exception_caught_listeners_.reset(modified);
+ } else {
+ exception_caught_listeners_.reset();
+ delete modified;
+ }
+ }
}
UpdateInterpreterHandlerTable();
}
@@ -684,7 +747,8 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
- CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
+ CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method)
+ << " is already deoptimized";
}
if (!interpreter_stubs_installed_) {
UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint(), GetPortableToInterpreterBridge(),
@@ -853,33 +917,33 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj
void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method,
uint32_t dex_pc) const {
- // TODO: STL copy-on-write collection? The copy below is due to the debug listener having an
- // action where it can remove itself as a listener and break the iterator. The copy only works
- // around the problem and in general we may have to move to something like reference counting to
- // ensure listeners are deleted correctly.
- std::list<InstrumentationListener*> copy(dex_pc_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->DexPcMoved(thread, this_object, method, dex_pc);
+ if (HasDexPcListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
+ }
}
}
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const {
- // TODO: same comment than DexPcMovedEventImpl.
- std::list<InstrumentationListener*> copy(field_read_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->FieldRead(thread, this_object, method, dex_pc, field);
+ if (HasFieldReadListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldRead(thread, this_object, method, dex_pc, field);
+ }
}
}
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field, const JValue& field_value) const {
- // TODO: same comment than DexPcMovedEventImpl.
- std::list<InstrumentationListener*> copy(field_write_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ if (HasFieldWriteListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ }
}
}
@@ -891,11 +955,10 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation&
DCHECK_EQ(thread->GetException(nullptr), exception_object);
bool is_exception_reported = thread->IsExceptionReportedToInstrumentation();
thread->ClearException();
- // TODO: The copy below is due to the debug listener having an action where it can remove
- // itself as a listener and break the iterator. The copy only works around the problem.
- std::list<InstrumentationListener*> copy(exception_caught_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ std::shared_ptr<std::list<InstrumentationListener*>> original(exception_caught_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc,
+ exception_object);
}
thread->SetException(throw_location, exception_object);
thread->SetExceptionReportedToInstrumentation(is_exception_reported);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 66c6b388d4..21d11a5e1c 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -433,10 +433,14 @@ class Instrumentation {
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> field_write_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> exception_caught_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 9eab3fde13..e085ac245d 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -25,7 +25,7 @@
#include "mirror/art_method.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
-#include "native_bridge.h"
+#include "nativebridge/native_bridge.h"
#include "java_vm_ext.h"
#include "parsed_options.h"
#include "ScopedLocalRef.h"
@@ -135,7 +135,7 @@ class SharedLibrary {
CHECK(NeedsNativeBridge());
uint32_t len = 0;
- return NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+ return android::NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
}
private:
@@ -645,8 +645,8 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject
void* handle = dlopen(path_str, RTLD_LAZY);
bool needs_native_bridge = false;
if (handle == nullptr) {
- if (NativeBridgeIsSupported(path_str)) {
- handle = NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+ if (android::NativeBridgeIsSupported(path_str)) {
+ handle = android::NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
needs_native_bridge = true;
}
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 7795b7c5df..4155c82699 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -283,7 +283,9 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) {
{
ScopedThreadStateChange tsc(self, kWaitingForDebuggerToAttach);
MutexLock attach_locker(self, state->attach_lock_);
- state->attach_cond_.Wait(self);
+ while (state->debug_thread_id_ == 0) {
+ state->attach_cond_.Wait(self);
+ }
}
if (!state->IsActive()) {
LOG(ERROR) << "JDWP connection failed";
@@ -335,10 +337,6 @@ void JdwpState::ResetState() {
*/
JdwpState::~JdwpState() {
if (netState != NULL) {
- if (IsConnected()) {
- PostVMDeath();
- }
-
/*
* Close down the network to inspire the thread to halt.
*/
@@ -458,6 +456,7 @@ void JdwpState::Run() {
if (!netState->Establish(options_)) {
/* wake anybody who was waiting for us to succeed */
MutexLock mu(thread_, attach_lock_);
+ debug_thread_id_ = static_cast<ObjectId>(-1);
attach_cond_.Broadcast(thread_);
break;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index b7d485e520..1c870cd007 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -43,7 +43,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 844d14a063..b236edea9e 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -746,6 +746,21 @@ TEST_F(JniInternalTest, GetMethodID) {
GetMethodIdBadArgumentTest(true);
}
+TEST_F(JniInternalTest, CallVoidMethodNullReceiver) {
+ jclass jlobject = env_->FindClass("java/lang/Object");
+ jmethodID method;
+
+ // Check that GetMethodID for java.lang.NoSuchMethodError.<init>(String) finds the constructor.
+ method = env_->GetMethodID(jlobject, "<init>", "()V");
+ EXPECT_NE(nullptr, method);
+ EXPECT_FALSE(env_->ExceptionCheck());
+
+ // Null object to CallVoidMethod.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->CallVoidMethod(nullptr, method);
+ check_jni_abort_catcher.Check("null");
+}
+
TEST_F(JniInternalTest, GetStaticMethodID) {
jclass jlobject = env_->FindClass("java/lang/Object");
jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 89de16e13b..dfb42b8e05 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -137,25 +137,26 @@ static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
return dest;
}
-// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-class Leb128EncodingVector {
+// An encoder that pushed uint32_t data onto the given std::vector.
+class Leb128Encoder {
public:
- Leb128EncodingVector() {
+ explicit Leb128Encoder(std::vector<uint8_t>* data) : data_(data) {
+ DCHECK(data != nullptr);
}
void Reserve(uint32_t size) {
- data_.reserve(size);
+ data_->reserve(size);
}
void PushBackUnsigned(uint32_t value) {
uint8_t out = value & 0x7f;
value >>= 7;
while (value != 0) {
- data_.push_back(out | 0x80);
+ data_->push_back(out | 0x80);
out = value & 0x7f;
value >>= 7;
}
- data_.push_back(out);
+ data_->push_back(out);
}
template<typename It>
@@ -169,12 +170,12 @@ class Leb128EncodingVector {
uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
uint8_t out = value & 0x7f;
while (extra_bits != 0u) {
- data_.push_back(out | 0x80);
+ data_->push_back(out | 0x80);
value >>= 7;
out = value & 0x7f;
extra_bits >>= 7;
}
- data_.push_back(out);
+ data_->push_back(out);
}
template<typename It>
@@ -185,12 +186,23 @@ class Leb128EncodingVector {
}
const std::vector<uint8_t>& GetData() const {
- return data_;
+ return *data_;
}
+ protected:
+ std::vector<uint8_t>* const data_;
+
private:
- std::vector<uint8_t> data_;
+ DISALLOW_COPY_AND_ASSIGN(Leb128Encoder);
+};
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class Leb128EncodingVector FINAL : private std::vector<uint8_t>, public Leb128Encoder {
+ public:
+ Leb128EncodingVector() : Leb128Encoder(this) {
+ }
+ private:
DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
};
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index f71d273023..8150456eb9 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -41,6 +41,11 @@ class MethodHelper {
return method_->GetInterfaceMethodIfProxy();
}
+ // GetMethod() != Get() for proxy methods.
+ mirror::ArtMethod* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return method_.Get();
+ }
+
mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 370bfb997b..131f5d6ee0 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -281,6 +281,19 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
return found_dex_pc;
}
+bool ArtMethod::IsEntrypointInterpreter() {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
+ const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
+ if (!IsPortableCompiled()) { // Quick.
+ return oat_quick_code == nullptr ||
+ oat_quick_code != GetEntryPointFromQuickCompiledCode();
+ } else { // Portable.
+ return oat_portable_code == nullptr ||
+ oat_portable_code != GetEntryPointFromPortableCompiledCode();
+ }
+}
+
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -318,6 +331,13 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
have_quick_code ? GetEntryPointFromQuickCompiledCode()
: GetEntryPointFromPortableCompiledCode());
}
+
+ // Ensure that we won't be accidentally calling quick/portable compiled code when -Xint.
+ if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
+ CHECK(IsEntrypointInterpreter())
+ << "Don't call compiled code when -Xint " << PrettyMethod(this);
+ }
+
if (!IsPortableCompiled()) {
#ifdef __LP64__
if (!IsStatic()) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index fa592c29b5..ebd5bd581b 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -309,6 +309,11 @@ class MANAGED ArtMethod FINAL : public Object {
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns true if the entrypoint points to the interpreter, as
+ // opposed to the compiled code, that is, this method will be
+ // interpretered on invocation.
+ bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetPortableOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 3543654868..a1177d645d 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -162,7 +162,8 @@ int32_t Object::IdentityHashCode() const {
break;
}
case LockWord::kThinLocked: {
- // Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
+ // Inflate the thin lock to a monitor and stick the hash code inside of the monitor. May
+ // fail spuriously.
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_this(hs.NewHandle(current_this));
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 433c1b2d6d..5dd16efa02 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -176,10 +176,6 @@ Monitor::~Monitor() {
// Deflated monitors have a null object.
}
-/*
- * Links a thread into a monitor's wait set. The monitor lock must be
- * held by the caller of this routine.
- */
void Monitor::AppendToWaitSet(Thread* thread) {
DCHECK(owner_ == Thread::Current());
DCHECK(thread != NULL);
@@ -197,10 +193,6 @@ void Monitor::AppendToWaitSet(Thread* thread) {
t->SetWaitNext(thread);
}
-/*
- * Unlinks a thread from a monitor's wait set. The monitor lock must
- * be held by the caller of this routine.
- */
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
DCHECK(thread != NULL);
@@ -395,29 +387,6 @@ bool Monitor::Unlock(Thread* self) {
return true;
}
-/*
- * Wait on a monitor until timeout, interrupt, or notification. Used for
- * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
- *
- * If another thread calls Thread.interrupt(), we throw InterruptedException
- * and return immediately if one of the following are true:
- * - blocked in wait(), wait(long), or wait(long, int) methods of Object
- * - blocked in join(), join(long), or join(long, int) methods of Thread
- * - blocked in sleep(long), or sleep(long, int) methods of Thread
- * Otherwise, we set the "interrupted" flag.
- *
- * Checks to make sure that "ns" is in the range 0-999999
- * (i.e. fractions of a millisecond) and throws the appropriate
- * exception if it isn't.
- *
- * The spec allows "spurious wakeups", and recommends that all code using
- * Object.wait() do so in a loop. This appears to derive from concerns
- * about pthread_cond_wait() on multiprocessor systems. Some commentary
- * on the web casts doubt on whether these can/should occur.
- *
- * Since we're allowed to wake up "early", we clamp extremely long durations
- * to return at the end of the 32-bit time epoch.
- */
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
DCHECK(self != NULL);
@@ -641,11 +610,6 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
return true;
}
-/*
- * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The calling
- * thread must own the lock or the owner must be suspended. There's a race with other threads
- * inflating the lock and so the caller should read the monitor following the call.
- */
void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
@@ -823,38 +787,37 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
}
}
-/*
- * Object.wait(). Also called for class init.
- */
void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
LockWord lock_word = obj->GetLockWord(true);
- switch (lock_word.GetState()) {
- case LockWord::kHashCode:
- // Fall-through.
- case LockWord::kUnlocked:
- ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
- return; // Failure.
- case LockWord::kThinLocked: {
- uint32_t thread_id = self->GetThreadId();
- uint32_t owner_thread_id = lock_word.ThinLockOwner();
- if (owner_thread_id != thread_id) {
+ while (lock_word.GetState() != LockWord::kFatLocked) {
+ switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
+ case LockWord::kUnlocked:
ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
return; // Failure.
- } else {
- // We own the lock, inflate to enqueue ourself on the Monitor.
- Inflate(self, self, obj, 0);
- lock_word = obj->GetLockWord(true);
+ case LockWord::kThinLocked: {
+ uint32_t thread_id = self->GetThreadId();
+ uint32_t owner_thread_id = lock_word.ThinLockOwner();
+ if (owner_thread_id != thread_id) {
+ ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
+ return; // Failure.
+ } else {
+ // We own the lock, inflate to enqueue ourself on the Monitor. May fail spuriously so
+ // re-load.
+ Inflate(self, self, obj, 0);
+ lock_word = obj->GetLockWord(true);
+ }
+ break;
+ }
+ case LockWord::kFatLocked: // Unreachable given the loop condition above. Fall-through.
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+ return;
}
- break;
- }
- case LockWord::kFatLocked:
- break; // Already set for a wait.
- default: {
- LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
- return;
}
}
Monitor* mon = lock_word.FatLockMonitor();
@@ -982,7 +945,7 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
}
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context) {
+ void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
CHECK(m != NULL);
@@ -1015,10 +978,19 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
return; // No "tries" implies no synchronization, so no held locks to report.
}
+ // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
+ // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
+ // inconsistent stack anyways.
+ uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
+ if (!abort_on_failure && dex_pc == DexFile::kDexNoIndex) {
+ LOG(ERROR) << "Could not find dex_pc for " << PrettyMethod(m);
+ return;
+ }
+
// Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
// the locks held in this stack frame.
std::vector<uint32_t> monitor_enter_dex_pcs;
- verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), &monitor_enter_dex_pcs);
+ verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
if (monitor_enter_dex_pcs.empty()) {
return;
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 26d43c953b..efa83c70c5 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -74,6 +74,8 @@ class Monitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DoNotify(self, obj, true);
}
+
+ // Object.wait(). Also called for class init.
static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -88,8 +90,10 @@ class Monitor {
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
+ // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
+ // is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context)
+ void* callback_context, bool abort_on_failure = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
@@ -117,6 +121,7 @@ class Monitor {
return monitor_id_;
}
+ // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
@@ -135,9 +140,18 @@ class Monitor {
LOCKS_EXCLUDED(monitor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
+ // routine.
void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+
+ // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of
+ // this routine.
void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+ // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
+ // calling thread must own the lock or the owner must be suspended. There's a race with other
+ // threads inflating the lock, installing hash codes and spurious failures. The caller should
+ // re-read the lock word following the call.
static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -168,6 +182,25 @@ class Monitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
+ // (somewhat indirectly) Thread.sleep() and Thread.join().
+ //
+ // If another thread calls Thread.interrupt(), we throw InterruptedException and return
+ // immediately if one of the following are true:
+ // - blocked in wait(), wait(long), or wait(long, int) methods of Object
+ // - blocked in join(), join(long), or join(long, int) methods of Thread
+ // - blocked in sleep(long), or sleep(long, int) methods of Thread
+ // Otherwise, we set the "interrupted" flag.
+ //
+ // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
+ // throws the appropriate exception if it isn't.
+ //
+ // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
+ // a loop. This appears to derive from concerns about pthread_cond_wait() on multiprocessor
+ // systems. Some commentary on the web casts doubt on whether these can/should occur.
+ //
+ // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
+ // of the 32-bit time epoch.
void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
LOCKS_EXCLUDED(monitor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index f199c99599..14d6cd950e 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,6 +27,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
+#include "base/stringprintf.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -490,6 +491,12 @@ static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
}
const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
+ if (target_instruction_set == kNone) {
+ ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
+ std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set));
+ env->ThrowNew(iae.get(), message.c_str());
+ return 0;
+ }
// Get the filename for odex file next to the dex file.
std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
@@ -551,8 +558,16 @@ static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
+ if (env->ExceptionCheck()) {
+ return 0;
+ }
+
NullableScopedUtfChars pkgname(env, javaPkgname);
+
ScopedUtfChars instruction_set(env, javaInstructionSet);
+ if (env->ExceptionCheck()) {
+ return 0;
+ }
return IsDexOptNeededInternal(env, filename.c_str(), pkgname.c_str(),
instruction_set.c_str(), defer);
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 5f718ba213..b0792293e0 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -76,6 +76,10 @@ static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) {
ScopedFastNativeObjectAccess soa(env);
NthCallerVisitor visitor(soa.Self(), 2);
visitor.WalkStack();
+ if (UNLIKELY(visitor.caller == nullptr)) {
+ // The caller is an attached native thread.
+ return nullptr;
+ }
return soa.AddLocalReference<jobject>(visitor.caller->GetDeclaringClass()->GetClassLoader());
}
@@ -113,6 +117,10 @@ static jclass VMStack_getStackClass2(JNIEnv* env, jclass) {
ScopedFastNativeObjectAccess soa(env);
NthCallerVisitor visitor(soa.Self(), 3);
visitor.WalkStack();
+ if (UNLIKELY(visitor.caller == nullptr)) {
+ // The caller is an attached native thread.
+ return nullptr;
+ }
return soa.AddLocalReference<jclass>(visitor.caller->GetDeclaringClass());
}
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
deleted file mode 100644
index d0b516bf35..0000000000
--- a/runtime/native_bridge.cc
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "native_bridge.h"
-
-#include <dlfcn.h>
-#include <stdio.h>
-#include "jni.h"
-
-#include "base/mutex.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "scoped_thread_state_change.h"
-#include "ScopedLocalRef.h"
-#include "thread.h"
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-#endif
-
-
-namespace art {
-
-// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
-static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
-
-// The library name we are supposed to load.
-static std::string native_bridge_library_string = "";
-
-// Whether a native bridge is available (loaded and ready).
-static bool available = false;
-// Whether we have already initialized (or tried to).
-static bool initialized = false;
-
-struct NativeBridgeCallbacks;
-static NativeBridgeCallbacks* callbacks = nullptr;
-
-// ART interfaces to native-bridge.
-struct NativeBridgeArtCallbacks {
- // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // mid [IN] Java methodID.
- // Returns:
- // short descriptor for method.
- const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
-
- // Get number of native methods for specified class.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // clazz [IN] Java class object.
- // Returns:
- // number of native methods.
- uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
-
- // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
- // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // clazz [IN] Java class object.
- // methods [OUT] array of method with the name, shorty, and fnPtr.
- // method_count [IN] max number of elements in methods.
- // Returns:
- // number of method it actually wrote to methods.
- uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count);
-};
-
-// Native-bridge interfaces to ART
-struct NativeBridgeCallbacks {
- // Initialize native-bridge. Native-bridge's internal implementation must ensure MT safety and
- // that the native-bridge is initialized only once. Thus it is OK to call this interface for an
- // already initialized native-bridge.
- //
- // Parameters:
- // art_cbs [IN] the pointer to NativeBridgeArtCallbacks.
- // Returns:
- // true iff initialization was successful.
- bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
-
- // Load a shared library that is supported by the native-bridge.
- //
- // Parameters:
- // libpath [IN] path to the shared library
- // flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
- // Returns:
- // The opaque handle of the shared library if sucessful, otherwise NULL
- void* (*loadLibrary)(const char* libpath, int flag);
-
- // Get a native-bridge trampoline for specified native method. The trampoline has same
- // sigature as the native method.
- //
- // Parameters:
- // handle [IN] the handle returned from loadLibrary
- // shorty [IN] short descriptor of native method
- // len [IN] length of shorty
- // Returns:
- // address of trampoline if successful, otherwise NULL
- void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
-
- // Check whether native library is valid and is for an ABI that is supported by native-bridge.
- //
- // Parameters:
- // libpath [IN] path to the shared library
- // Returns:
- // TRUE if library is supported by native-bridge, FALSE otherwise
- bool (*isSupported)(const char* libpath);
-};
-
-static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
- ScopedObjectAccess soa(env);
- StackHandleScope<1> scope(soa.Self());
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
- MethodHelper mh(scope.NewHandle(m));
- return mh.GetShorty();
-}
-
-static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
- if (clazz == nullptr)
- return 0;
-
- ScopedObjectAccess soa(env);
- mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
-
- uint32_t native_method_count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
- }
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
- }
- return native_method_count;
-}
-
-static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count) {
- if ((clazz == nullptr) || (methods == nullptr)) {
- return 0;
- }
- ScopedObjectAccess soa(env);
- mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
-
- uint32_t count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
- count++;
- } else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
- }
- }
- }
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
- count++;
- } else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
- }
- }
- }
- return count;
-}
-
-static NativeBridgeArtCallbacks NativeBridgeArtItf = {
- GetMethodShorty,
- GetNativeMethodCount,
- GetNativeMethods
-};
-
-void SetNativeBridgeLibraryString(const std::string& nb_library_string) {
- // This is called when the runtime starts and nothing is working concurrently
- // so we don't need a lock here.
-
- native_bridge_library_string = nb_library_string;
-
- if (native_bridge_library_string.empty()) {
- initialized = true;
- available = false;
- }
-}
-
-static bool NativeBridgeInitialize() {
- // TODO: Missing annotalysis static lock ordering of DEFAULT_MUTEX_ACQUIRED, place lock into
- // global order or remove.
- static Mutex lock("native bridge lock");
- MutexLock mu(Thread::Current(), lock);
-
- if (initialized) {
- // Somebody did it before.
- return available;
- }
-
- available = false;
-
- void* handle = dlopen(native_bridge_library_string.c_str(), RTLD_LAZY);
- if (handle != nullptr) {
- callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
- kNativeBridgeInterfaceSymbol));
-
- if (callbacks != nullptr) {
- available = callbacks->initialize(&NativeBridgeArtItf);
- }
-
- if (!available) {
- dlclose(handle);
- }
- }
-
- initialized = true;
-
- return available;
-}
-
-void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
- if (NativeBridgeInitialize()) {
- return callbacks->loadLibrary(libpath, flag);
- }
- return nullptr;
-}
-
-void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
- uint32_t len) {
- if (NativeBridgeInitialize()) {
- return callbacks->getTrampoline(handle, name, shorty, len);
- }
- return nullptr;
-}
-
-bool NativeBridgeIsSupported(const char* libpath) {
- if (NativeBridgeInitialize()) {
- return callbacks->isSupported(libpath);
- }
- return false;
-}
-
-}; // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
deleted file mode 100644
index be647fc1eb..0000000000
--- a/runtime/native_bridge.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
-#define ART_RUNTIME_NATIVE_BRIDGE_H_
-
-#include <string>
-
-namespace art {
-
-// Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
-// signals that we do not want to load a native bridge.
-void SetNativeBridgeLibraryString(const std::string& native_bridge_library_string);
-
-// Load a shared library that is supported by the native-bridge.
-void* NativeBridgeLoadLibrary(const char* libpath, int flag);
-
-// Get a native-bridge trampoline for specified native method.
-void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
-
-// True if native library is valid and is for an ABI that is supported by native-bridge.
-bool NativeBridgeIsSupported(const char* libpath);
-
-}; // namespace art
-
-#endif // ART_RUNTIME_NATIVE_BRIDGE_H_
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
new file mode 100644
index 0000000000..453c92f495
--- /dev/null
+++ b/runtime/native_bridge_art_interface.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_bridge_art_interface.h"
+
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> scope(soa.Self());
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ MethodHelper mh(scope.NewHandle(m));
+ return mh.GetShorty();
+}
+
+uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+ if (clazz == nullptr)
+ return 0;
+
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t native_method_count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ return native_method_count;
+}
+
+uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
+ if ((clazz == nullptr) || (methods == nullptr)) {
+ return 0;
+ }
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ return count;
+}
+
+}; // namespace art
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
new file mode 100644
index 0000000000..08735c8955
--- /dev/null
+++ b/runtime/native_bridge_art_interface.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
+#define ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
+
+#include <jni.h>
+#include <stdint.h>
+
+namespace art {
+
+const char* GetMethodShorty(JNIEnv* env, jmethodID mid);
+
+uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz);
+
+uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count);
+
+}; // namespace art
+
+#endif // ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 971daf8bbf..50dfe2109f 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -45,7 +45,7 @@ OatFile* OatFile::OpenMemory(std::vector<uint8_t>& oat_contents,
std::string* error_msg) {
CHECK(!oat_contents.empty()) << location;
CheckLocation(location);
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
oat_file->begin_ = &oat_contents[0];
oat_file->end_ = &oat_contents[oat_contents.size()];
return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
@@ -97,7 +97,7 @@ OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
byte* requested_base,
std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
if (!success) {
return nullptr;
@@ -111,7 +111,7 @@ OatFile* OatFile::OpenElfFile(File* file,
bool writable,
bool executable,
std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, executable));
bool success = oat_file->ElfFileOpen(file, requested_base, writable, executable, error_msg);
if (!success) {
CHECK(!error_msg->empty());
@@ -120,8 +120,9 @@ OatFile* OatFile::OpenElfFile(File* file,
return oat_file.release();
}
-OatFile::OatFile(const std::string& location)
- : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL),
+OatFile::OatFile(const std::string& location, bool is_executable)
+ : location_(location), begin_(NULL), end_(NULL), is_executable_(is_executable),
+ dlopen_handle_(NULL),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -460,15 +461,17 @@ OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) con
uint32_t bitmap_size = 0;
const byte* bitmap_pointer = nullptr;
const byte* methods_pointer = nullptr;
- if (type == kOatClassSomeCompiled) {
- bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
- bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
- CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
- methods_pointer = bitmap_pointer + bitmap_size;
- } else {
- methods_pointer = after_type_pointer;
+ if (type != kOatClassNoneCompiled) {
+ if (type == kOatClassSomeCompiled) {
+ bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
+ bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
+ CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ methods_pointer = bitmap_pointer + bitmap_size;
+ } else {
+ methods_pointer = after_type_pointer;
+ }
+ CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
}
- CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
return OatClass(oat_file_,
status,
@@ -486,22 +489,23 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
const OatMethodOffsets* methods_pointer)
: oat_file_(oat_file), status_(status), type_(type),
bitmap_(bitmap_pointer), methods_pointer_(methods_pointer) {
- CHECK(methods_pointer != nullptr);
switch (type_) {
case kOatClassAllCompiled: {
CHECK_EQ(0U, bitmap_size);
CHECK(bitmap_pointer == nullptr);
+ CHECK(methods_pointer != nullptr);
break;
}
case kOatClassSomeCompiled: {
CHECK_NE(0U, bitmap_size);
CHECK(bitmap_pointer != nullptr);
+ CHECK(methods_pointer != nullptr);
break;
}
case kOatClassNoneCompiled: {
CHECK_EQ(0U, bitmap_size);
CHECK(bitmap_pointer == nullptr);
- methods_pointer_ = nullptr;
+ CHECK(methods_pointer_ == nullptr);
break;
}
case kOatClassMax: {
@@ -530,22 +534,19 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
methods_pointer_index = num_set_bits;
}
const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
- return OatMethod(
- oat_file_->Begin(),
- oat_method_offsets.code_offset_,
- oat_method_offsets.gc_map_offset_);
-}
-
-OatFile::OatMethod::OatMethod(const byte* base,
- const uint32_t code_offset,
- const uint32_t gc_map_offset)
- : begin_(base),
- code_offset_(code_offset),
- native_gc_map_offset_(gc_map_offset) {
+ if (oat_file_->IsExecutable()
+ || (Runtime::Current() == nullptr)
+ || Runtime::Current()->IsCompiler()) {
+ return OatMethod(
+ oat_file_->Begin(),
+ oat_method_offsets.code_offset_,
+ oat_method_offsets.gc_map_offset_);
+ } else {
+ // We aren't allowed to use the compiled code. We just force it down the interpreted version.
+ return OatMethod(oat_file_->Begin(), 0, 0);
+ }
}
-OatFile::OatMethod::~OatMethod() {}
-
uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 810eccb2d1..8535bf4133 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -64,6 +64,10 @@ class OatFile {
~OatFile();
+ bool IsExecutable() const {
+ return is_executable_;
+ }
+
ElfFile* GetElfFile() const {
CHECK_NE(reinterpret_cast<uintptr_t>(elf_file_.get()), reinterpret_cast<uintptr_t>(nullptr))
<< "Cannot get an elf file from " << GetLocation();
@@ -126,14 +130,19 @@ class OatFile {
const uint8_t* GetMappingTable() const;
const uint8_t* GetVmapTable() const;
- ~OatMethod();
-
// Create an OatMethod with offsets relative to the given base address
- OatMethod(const byte* base,
- const uint32_t code_offset,
- const uint32_t gc_map_offset);
+ OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
+ : begin_(base),
+ code_offset_(code_offset),
+ native_gc_map_offset_(gc_map_offset) {
+ }
+ ~OatMethod() {}
- OatMethod() {}
+ // A representation of an invalid OatMethod, used when an OatMethod or OatClass can't be found.
+ // See ClassLinker::FindOatMethodFor.
+ static const OatMethod Invalid() {
+ return OatMethod(nullptr, -1, -1);
+ }
private:
template<class T>
@@ -144,10 +153,10 @@ class OatFile {
return reinterpret_cast<T>(begin_ + offset);
}
- const byte* begin_;
+ const byte* const begin_;
- uint32_t code_offset_;
- uint32_t native_gc_map_offset_;
+ const uint32_t code_offset_;
+ const uint32_t native_gc_map_offset_;
friend class OatClass;
};
@@ -168,7 +177,12 @@ class OatFile {
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
- OatClass() {}
+ // A representation of an invalid OatClass, used when an OatClass can't be found.
+ // See ClassLinker::FindOatClass.
+ static OatClass Invalid() {
+ return OatClass(nullptr, mirror::Class::kStatusError, kOatClassNoneCompiled, 0, nullptr,
+ nullptr);
+ }
private:
OatClass(const OatFile* oat_file,
@@ -178,15 +192,15 @@ class OatFile {
const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer);
- const OatFile* oat_file_;
+ const OatFile* const oat_file_;
- mirror::Class::Status status_;
+ const mirror::Class::Status status_;
- OatClassType type_;
+ const OatClassType type_;
- const uint32_t* bitmap_;
+ const uint32_t* const bitmap_;
- const OatMethodOffsets* methods_pointer_;
+ const OatMethodOffsets* const methods_pointer_;
friend class OatDexFile;
};
@@ -260,7 +274,7 @@ class OatFile {
bool executable,
std::string* error_msg);
- explicit OatFile(const std::string& filename);
+ explicit OatFile(const std::string& filename, bool executable);
bool Dlopen(const std::string& elf_filename, byte* requested_base, std::string* error_msg);
bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
std::string* error_msg);
@@ -277,6 +291,9 @@ class OatFile {
// Pointer to end of oat region for bounds checking.
const byte* end_;
+ // Was this oat_file loaded executable?
+ const bool is_executable_;
+
// Backing memory map for oat file during when opened by ElfWriter during initial compilation.
std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 27f776544a..26360d77e2 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -414,8 +414,12 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
compiler_callbacks_ =
reinterpret_cast<CompilerCallbacks*>(const_cast<void*>(options[i].second));
} else if (option == "imageinstructionset") {
- image_isa_ = GetInstructionSetFromString(
- reinterpret_cast<const char*>(options[i].second));
+ const char* isa_str = reinterpret_cast<const char*>(options[i].second);
+ image_isa_ = GetInstructionSetFromString(isa_str);
+ if (image_isa_ == kNone) {
+ Usage("%s is not a valid instruction set.", isa_str);
+ return false;
+ }
} else if (option == "-Xzygote") {
is_zygote_ = true;
} else if (StartsWith(option, "-Xpatchoat:")) {
@@ -609,7 +613,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
return false;
}
} else if (StartsWith(option, "-XX:NativeBridge=")) {
- if (!ParseStringAfterChar(option, '=', &native_bridge_library_string_)) {
+ if (!ParseStringAfterChar(option, '=', &native_bridge_library_path_)) {
return false;
}
} else if (StartsWith(option, "-ea") ||
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index aa2c55713e..1afd610a53 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -46,7 +46,7 @@ class ParsedOptions {
bool check_jni_;
bool force_copy_;
std::string jni_trace_;
- std::string native_bridge_library_string_;
+ std::string native_bridge_library_path_;
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool must_relocate_;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 5af26b08b4..d977ce9d17 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -17,14 +17,14 @@
#include <jni.h>
#include <vector>
-#include "common_runtime_test.h"
+#include "common_compiler_test.h"
#include "field_helper.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-class ProxyTest : public CommonRuntimeTest {
+class ProxyTest : public CommonCompilerTest {
public:
// Generate a proxy class with the given name and interfaces. This is a simplification from what
// libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
@@ -103,12 +103,6 @@ class ProxyTest : public CommonRuntimeTest {
soa.Self()->AssertNoPendingException();
return proxyClass;
}
-
- protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
- options->push_back(std::make_pair(StringPrintf("-Ximage:%s", GetLibCoreOatFileName().c_str()),
- nullptr));
- }
};
// Creates a proxy class and check ClassHelper works correctly.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 41d69894d5..98eeda7263 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -29,14 +29,14 @@
namespace art {
static constexpr bool kDebugExceptionDelivery = false;
-static constexpr size_t kInvalidFrameId = 0xffffffff;
+static constexpr size_t kInvalidFrameDepth = 0xffffffff;
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
: self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_method_(nullptr),
- handler_dex_pc_(0), clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
+ handler_dex_pc_(0), clear_exception_(false), handler_frame_depth_(kInvalidFrameDepth) {
}
// Finds catch handler or prepares for deoptimization.
@@ -51,7 +51,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = GetMethod();
- exception_handler_->SetHandlerFrameId(GetFrameId());
+ exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
@@ -177,7 +177,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- exception_handler_->SetHandlerFrameId(GetFrameId());
+ exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
mirror::ArtMethod* method = GetMethod();
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
@@ -295,17 +295,17 @@ void QuickExceptionHandler::DeoptimizeStack() {
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_id)
+ InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
- self_(self), frame_id_(frame_id),
+ self_(self), frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
- CHECK_NE(frame_id_, kInvalidFrameId);
+ CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- size_t current_frame_id = GetFrameId();
- if (current_frame_id > frame_id_) {
+ size_t current_frame_depth = GetFrameDepth();
+ if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
if (UNLIKELY(GetQuickInstrumentationExitPc() == GetReturnPc())) {
++instrumentation_frames_to_pop_;
@@ -323,7 +323,7 @@ class InstrumentationStackVisitor : public StackVisitor {
private:
Thread* const self_;
- const size_t frame_id_;
+ const size_t frame_depth_;
size_t instrumentation_frames_to_pop_;
DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
@@ -331,7 +331,7 @@ class InstrumentationStackVisitor : public StackVisitor {
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_id_);
+ InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 1d600ed697..b93769cb97 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -77,8 +77,8 @@ class QuickExceptionHandler {
clear_exception_ = clear_exception;
}
- void SetHandlerFrameId(size_t frame_id) {
- handler_frame_id_ = frame_id;
+ void SetHandlerFrameDepth(size_t frame_depth) {
+ handler_frame_depth_ = frame_depth;
}
private:
@@ -97,8 +97,8 @@ class QuickExceptionHandler {
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
bool clear_exception_;
- // Frame id of the catch handler or the upcall.
- size_t handler_frame_id_;
+ // Frame depth of the catch handler or the upcall.
+ size_t handler_frame_depth_;
DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d3957c1e9f..ba53c43620 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -63,7 +63,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
-#include "native_bridge.h"
+#include "native_bridge_art_interface.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "quick/quick_method_frame_info.h"
@@ -143,7 +143,8 @@ Runtime::Runtime()
target_sdk_version_(0),
implicit_null_checks_(false),
implicit_so_checks_(false),
- implicit_suspend_checks_(false) {
+ implicit_suspend_checks_(false),
+ native_bridge_art_callbacks_({GetMethodShorty, GetNativeMethodCount, GetNativeMethods}) {
}
Runtime::~Runtime() {
@@ -708,8 +709,11 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
self->ClearException();
// Look for a native bridge.
- SetNativeBridgeLibraryString(options->native_bridge_library_string_);
-
+ native_bridge_library_path_ = options->native_bridge_library_path_;
+ if (!native_bridge_library_path_.empty()) {
+ android::SetupNativeBridge(native_bridge_library_path_.c_str(), &native_bridge_art_callbacks_);
+ VLOG(startup) << "Runtime::Setup native bridge library: " << native_bridge_library_path_;
+ }
VLOG(startup) << "Runtime::Init exiting";
return true;
}
@@ -896,11 +900,7 @@ void Runtime::BlockSignals() {
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- bool success = Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
- if (thread_name == NULL) {
- LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
- }
- return success;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
}
void Runtime::DetachCurrentThread() {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e76280ae95..34ccdcb3da 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -31,6 +31,7 @@
#include "instrumentation.h"
#include "instruction_set.h"
#include "jobject_comparator.h"
+#include "nativebridge/native_bridge.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "profiler_options.h"
@@ -616,6 +617,25 @@ class Runtime {
bool implicit_so_checks_; // StackOverflow checks are implicit.
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+ // The path to the native bridge library. If this is not empty the native bridge will be
+ // initialized and loaded from the pointed path.
+ //
+ // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
+ // if standard dlopen fails to load native library associated with native activity, it calls to
+ // the native bridge to load it and then gets the trampoline for the entry to native activity.
+ std::string native_bridge_library_path_;
+
+ // Native bridge library runtime callbacks. They represent the runtime interface to native bridge.
+ //
+ // The interface is expected to expose the following methods:
+ // getMethodShorty(): in the case of native method calling JNI native function CallXXXXMethodY(),
+ // native bridge calls back to VM for the shorty of the method so that it can prepare based on
+ // host calling convention.
+ // getNativeMethodCount() and getNativeMethods(): in case of JNI function UnregisterNatives(),
+ // native bridge can call back to get all native methods of specified class so that all
+ // corresponding trampolines can be destroyed.
+ android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/stack.h b/runtime/stack.h
index 578f569c43..e58caeee79 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -553,6 +553,10 @@ class StackVisitor {
return num_frames_;
}
+ size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return cur_depth_;
+ }
+
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7aabfceb2b..7ac685bd84 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -224,7 +224,8 @@ static size_t FixStackSize(size_t stack_size) {
} else {
// If we are going to use implicit stack checks, allocate space for the protected
// region at the bottom of the stack.
- stack_size += Thread::kStackOverflowImplicitCheckSize;
+ stack_size += Thread::kStackOverflowImplicitCheckSize +
+ GetStackOverflowReservedBytes(kRuntimeISA);
}
// Some systems require the stack size to be a multiple of the system page size, so round up.
@@ -264,7 +265,7 @@ void Thread::InstallImplicitProtection() {
// a segv.
// Read every page from the high address to the low.
- for (byte* p = stack_top; p > pregion; p -= kPageSize) {
+ for (byte* p = stack_top; p >= pregion; p -= kPageSize) {
dont_optimize_this = *p;
}
@@ -403,6 +404,8 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
if (thread_name != nullptr) {
self->tlsPtr_.name->assign(thread_name);
::art::SetThreadName(thread_name);
+ } else if (self->GetJniEnv()->check_jni) {
+ LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
}
}
@@ -505,9 +508,7 @@ void Thread::InitStackHwm() {
}
// TODO: move this into the Linux GetThreadStack implementation.
-#if defined(__APPLE__)
- bool is_main_thread = false;
-#else
+#if !defined(__APPLE__)
// If we're the main thread, check whether we were run with an unlimited stack. In that case,
// glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
// will be broken because we'll die long before we get close to 2GB.
@@ -539,7 +540,7 @@ void Thread::InitStackHwm() {
// Set stack_end_ to the bottom of the stack saving space of stack overflows
bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
- ResetDefaultStackEnd(implicit_stack_check);
+ ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
@@ -551,8 +552,19 @@ void Thread::InitStackHwm() {
// The thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- tlsPtr_.stack_begin += guardsize;
- tlsPtr_.stack_end += guardsize;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // Work around issue trying to read last page of stack on Intel.
+ // The bug for this is b/17111575. The problem is that we are
+ // unable to read the page just above the guard page on the
+ // main stack on an intel target. When the bug is fixed
+ // this can be removed.
+ if (::art::GetTid() == getpid()) {
+ guardsize += 4 * KB;
+ }
+#endif
+ tlsPtr_.stack_begin += guardsize + kStackOverflowProtectedSize;
+ tlsPtr_.stack_end += guardsize + kStackOverflowProtectedSize;
tlsPtr_.stack_size -= guardsize;
InstallImplicitProtection();
@@ -908,7 +920,8 @@ struct StackDumpVisitor : public StackVisitor {
Monitor::DescribeWait(os, thread);
}
if (can_allocate) {
- Monitor::VisitLocks(this, DumpLockedObject, &os);
+ // Visit locks, but do not abort on errors. This would trigger a nested abort.
+ Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 120ff6fbb4..fe950c4691 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -594,16 +594,10 @@ class Thread {
void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
- void ResetDefaultStackEnd(bool implicit_overflow_check) {
+ void ResetDefaultStackEnd() {
// Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
// to throw a StackOverflowError.
- if (implicit_overflow_check) {
- // For implicit checks we also need to add in the protected region above the
- // overflow region.
- tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
- } else {
- tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
- }
+ tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
}
// Install the protected region for implicit stack checks.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index b845f50d1f..d7e215b4c2 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -66,8 +66,9 @@ pid_t GetTid() {
uint64_t owner;
CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__); // Requires Mac OS 10.6
return owner;
+#elif defined(__BIONIC__)
+ return gettid();
#else
- // Neither bionic nor glibc exposes gettid(2).
return syscall(__NR_gettid);
#endif
}
@@ -999,7 +1000,7 @@ void SetThreadName(const char* thread_name) {
} else {
s = thread_name + len - 15;
}
-#if defined(HAVE_ANDROID_PTHREAD_SETNAME_NP)
+#if defined(__BIONIC__)
// pthread_setname_np fails rather than truncating long strings.
char buf[16]; // MAX_TASK_COMM_LEN=16 is hard-coded into bionic
strncpy(buf, s, sizeof(buf)-1);
@@ -1372,21 +1373,11 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
}
void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = UnsignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeUnsignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+ Leb128Encoder(dst).PushBackUnsigned(data);
}
void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = SignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeSignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+ Leb128Encoder(dst).PushBackSigned(data);
}
void PushWord(std::vector<uint8_t>* buf, int data) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index fb57fc7389..69627f51df 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -499,9 +499,9 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
}
}
failures_.push_back(error);
- std::string location(StringPrintf("%s: [0x%X]", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
+ std::string location(StringPrintf("%s: [0x%X] ", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
work_insn_idx_));
- std::ostringstream* failure_message = new std::ostringstream(location);
+ std::ostringstream* failure_message = new std::ostringstream(location, std::ostringstream::ate);
failure_messages_.push_back(failure_message);
return *failure_message;
}
@@ -516,7 +516,7 @@ void MethodVerifier::PrependToLastFailMessage(std::string prepend) {
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
prepend += last_fail_message->str();
- failure_messages_[failure_num - 1] = new std::ostringstream(prepend);
+ failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
delete last_fail_message;
}
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index c02f3106be..63bfc44797 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -123,7 +123,7 @@ ZipEntry* ZipArchive::Find(const char* name, std::string* error_msg) const {
// Resist the urge to delete the space. <: is a bigraph sequence.
std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
- const int32_t error = FindEntry(handle_, name, zip_entry.get());
+ const int32_t error = FindEntry(handle_, ZipEntryName(name), zip_entry.get());
if (error) {
*error_msg = std::string(ErrorCodeString(error));
return nullptr;
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 554712aa55..f5a1d65ac1 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -28,162 +28,133 @@
static JavaVM* jvm = NULL;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
- assert(vm != NULL);
- assert(jvm == NULL);
+ assert(vm != nullptr);
+ assert(jvm == nullptr);
jvm = vm;
return JNI_VERSION_1_6;
}
-static void* testFindClassOnAttachedNativeThread(void*) {
- assert(jvm != NULL);
+static void* AttachHelper(void* arg) {
+ assert(jvm != nullptr);
- JNIEnv* env = NULL;
+ JNIEnv* env = nullptr;
JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
- jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
- assert(!env->ExceptionCheck());
-
- jobjectArray array = env->NewObjectArray(0, clazz, NULL);
- assert(array != NULL);
- assert(!env->ExceptionCheck());
+ typedef void (*Fn)(JNIEnv*);
+ Fn fn = reinterpret_cast<Fn>(arg);
+ fn(env);
int detach_result = jvm->DetachCurrentThread();
assert(detach_result == 0);
- return NULL;
+ return nullptr;
}
-// http://b/10994325
-extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*,
- jclass) {
+static void PthreadHelper(void (*fn)(JNIEnv*)) {
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testFindClassOnAttachedNativeThread,
- NULL);
+ int pthread_create_result = pthread_create(&pthread, nullptr, AttachHelper,
+ reinterpret_cast<void*>(fn));
assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
+ int pthread_join_result = pthread_join(pthread, nullptr);
assert(pthread_join_result == 0);
}
-static void* testFindFieldOnAttachedNativeThread(void*) {
- assert(jvm != NULL);
+static void testFindClassOnAttachedNativeThread(JNIEnv* env) {
+ jclass clazz = env->FindClass("Main");
+ assert(clazz != nullptr);
+ assert(!env->ExceptionCheck());
- JNIEnv* env = NULL;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
- int attach_result = jvm->AttachCurrentThread(&env, &args);
- assert(attach_result == 0);
+ jobjectArray array = env->NewObjectArray(0, clazz, nullptr);
+ assert(array != nullptr);
+ assert(!env->ExceptionCheck());
+}
+
+// http://b/10994325
+extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*, jclass) {
+ PthreadHelper(&testFindClassOnAttachedNativeThread);
+}
+static void testFindFieldOnAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
+ assert(clazz != nullptr);
assert(!env->ExceptionCheck());
jfieldID field = env->GetStaticFieldID(clazz, "testFindFieldOnAttachedNativeThreadField", "Z");
- assert(field != NULL);
+ assert(field != nullptr);
assert(!env->ExceptionCheck());
env->SetStaticBooleanField(clazz, field, JNI_TRUE);
-
- int detach_result = jvm->DetachCurrentThread();
- assert(detach_result == 0);
- return NULL;
}
extern "C" JNIEXPORT void JNICALL Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv*,
- jclass) {
- pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testFindFieldOnAttachedNativeThread,
- NULL);
- assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
- assert(pthread_join_result == 0);
+ jclass) {
+ PthreadHelper(&testFindFieldOnAttachedNativeThread);
}
-static void* testReflectFieldGetFromAttachedNativeThread(void*) {
- assert(jvm != NULL);
-
- JNIEnv* env = NULL;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
- int attach_result = jvm->AttachCurrentThread(&env, &args);
- assert(attach_result == 0);
-
+static void testReflectFieldGetFromAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
+ assert(clazz != nullptr);
assert(!env->ExceptionCheck());
jclass class_clazz = env->FindClass("java/lang/Class");
- assert(class_clazz != NULL);
+ assert(class_clazz != nullptr);
assert(!env->ExceptionCheck());
jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField",
"(Ljava/lang/String;)Ljava/lang/reflect/Field;");
- assert(getFieldMetodId != NULL);
+ assert(getFieldMetodId != nullptr);
assert(!env->ExceptionCheck());
jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField");
- assert(field_name != NULL);
+ assert(field_name != nullptr);
assert(!env->ExceptionCheck());
jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name);
- assert(field != NULL);
+ assert(field != nullptr);
assert(!env->ExceptionCheck());
jclass field_clazz = env->FindClass("java/lang/reflect/Field");
- assert(field_clazz != NULL);
+ assert(field_clazz != nullptr);
assert(!env->ExceptionCheck());
jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean",
"(Ljava/lang/Object;)Z");
- assert(getBooleanMetodId != NULL);
+ assert(getBooleanMetodId != nullptr);
assert(!env->ExceptionCheck());
jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz);
assert(value == false);
assert(!env->ExceptionCheck());
-
- int detach_result = jvm->DetachCurrentThread();
- assert(detach_result == 0);
- return NULL;
}
// http://b/15539150
extern "C" JNIEXPORT void JNICALL Java_Main_testReflectFieldGetFromAttachedNativeThreadNative(
JNIEnv*, jclass) {
- pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testReflectFieldGetFromAttachedNativeThread,
- NULL);
- assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
- assert(pthread_join_result == 0);
+ PthreadHelper(&testReflectFieldGetFromAttachedNativeThread);
}
// http://b/11243757
extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
- jclass) {
+ jclass) {
jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass");
- assert(super_class != NULL);
+ assert(super_class != nullptr);
jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V");
- assert(execute != NULL);
+ assert(execute != nullptr);
jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass");
- assert(sub_class != NULL);
+ assert(sub_class != nullptr);
env->CallStaticVoidMethod(sub_class, execute);
}
extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) {
jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract");
- assert(abstract_class != NULL);
+ assert(abstract_class != nullptr);
jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z");
- assert(miranda_method != NULL);
+ assert(miranda_method != nullptr);
return env->ToReflectedMethod(abstract_class, miranda_method, JNI_FALSE);
}
@@ -191,7 +162,7 @@ extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv
extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
std::vector<uint8_t> buffer(1);
jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0);
- assert(byte_buffer != NULL);
+ assert(byte_buffer != nullptr);
assert(!env->ExceptionCheck());
assert(env->GetDirectBufferAddress(byte_buffer) == &buffer[0]);
@@ -202,8 +173,8 @@ constexpr size_t kByteReturnSize = 7;
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
- jbyte b3, jbyte b4, jbyte b5, jbyte b6,
- jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+ jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+ jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
assert(b2 == 2);
assert(b3 == -3);
@@ -227,8 +198,8 @@ jshort short_returns[kShortReturnSize] = { 0, 1, 2, 127, 32767, -1, -2, -128,
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
- jshort s3, jshort s4, jshort s5, jshort s6,
- jshort s7, jshort s8, jshort s9, jshort s10) {
+ jshort s3, jshort s4, jshort s5, jshort s6,
+ jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
assert(s2 == 2);
assert(s3 == -3);
@@ -247,9 +218,9 @@ extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshor
}
extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
- jboolean b2, jboolean b3, jboolean b4,
- jboolean b5, jboolean b6, jboolean b7,
- jboolean b8, jboolean b9, jboolean b10) {
+ jboolean b2, jboolean b3, jboolean b4,
+ jboolean b5, jboolean b6, jboolean b7,
+ jboolean b8, jboolean b9, jboolean b10) {
// We use b1 to drive the output.
assert(b2 == JNI_TRUE);
assert(b3 == JNI_FALSE);
@@ -269,8 +240,8 @@ constexpr size_t kCharReturnSize = 8;
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
- jchar c3, jchar c4, jchar c5, jchar c6,
- jchar c7, jchar c8, jchar c9, jchar c10) {
+ jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
+ jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
assert(c2 == 'a');
assert(c3 == 'b');
@@ -291,3 +262,94 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* e
jclass from, jclass to) {
return env->IsAssignableFrom(from, to);
}
+
+static void testShallowGetCallingClassLoader(JNIEnv* env) {
+ // Test direct call.
+ {
+ jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
+ assert(vmstack_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jmethodID getCallingClassLoaderMethodId = env->GetStaticMethodID(vmstack_clazz,
+ "getCallingClassLoader",
+ "()Ljava/lang/ClassLoader;");
+ assert(getCallingClassLoaderMethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject class_loader = env->CallStaticObjectMethod(vmstack_clazz,
+ getCallingClassLoaderMethodId);
+ assert(class_loader == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // Test one-level call. Use System.loadLibrary().
+ {
+ jclass system_clazz = env->FindClass("java/lang/System");
+ assert(system_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jmethodID loadLibraryMethodId = env->GetStaticMethodID(system_clazz, "loadLibrary",
+ "(Ljava/lang/String;)V");
+ assert(loadLibraryMethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ // Create a string object.
+ jobject library_string = env->NewStringUTF("arttest");
+ assert(library_string != nullptr);
+ assert(!env->ExceptionCheck());
+
+ env->CallStaticVoidMethod(system_clazz, loadLibraryMethodId, library_string);
+ if (env->ExceptionCheck()) {
+ // At most we expect UnsatisfiedLinkError.
+ jthrowable thrown = env->ExceptionOccurred();
+ env->ExceptionClear();
+
+ jclass unsatisfied_link_error_clazz = env->FindClass("java/lang/UnsatisfiedLinkError");
+ jclass thrown_class = env->GetObjectClass(thrown);
+ assert(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class));
+ }
+ }
+}
+
+// http://b/16867274
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+ jclass) {
+ PthreadHelper(&testShallowGetCallingClassLoader);
+}
+
+static void testShallowGetStackClass2(JNIEnv* env) {
+ jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
+ assert(vmstack_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ // Test direct call.
+ {
+ jmethodID getStackClass2MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass2",
+ "()Ljava/lang/Class;");
+ assert(getStackClass2MethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass2MethodId);
+ assert(caller_class == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // Test one-level call. Use VMStack.getStackClass1().
+ {
+ jmethodID getStackClass1MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass1",
+ "()Ljava/lang/Class;");
+ assert(getStackClass1MethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass1MethodId);
+ assert(caller_class == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // For better testing we would need to compile against libcore and have a two-deep stack
+ // ourselves.
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv* env, jclass) {
+ PthreadHelper(&testShallowGetStackClass2);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index ae133becbc..5884bc0e2b 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -30,6 +30,8 @@ public class Main {
testBooleanMethod();
testCharMethod();
testIsAssignableFromOnPrimitiveTypes();
+ testShallowGetCallingClassLoader();
+ testShallowGetStackClass2();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -167,4 +169,16 @@ public class Main {
}
native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
+
+ static void testShallowGetCallingClassLoader() {
+ nativeTestShallowGetCallingClassLoader();
+ }
+
+ native static void nativeTestShallowGetCallingClassLoader();
+
+ static void testShallowGetStackClass2() {
+ nativeTestShallowGetStackClass2();
+ }
+
+ native static void nativeTestShallowGetStackClass2();
}
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index 035690faa4..c93f8bbc54 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -31,6 +31,7 @@ public class Main {
static class InstanceMemEater {
static boolean sawOome;
+ static InstanceMemEater hook;
InstanceMemEater next;
double d1, d2, d3, d4, d5, d6, d7, d8; // Bloat this object so we fill the heap faster.
@@ -45,6 +46,7 @@ public class Main {
}
static void confuseCompilerOptimization(InstanceMemEater instance) {
+ hook = instance;
}
}
@@ -61,6 +63,7 @@ public class Main {
lastMemEater = lastMemEater.next;
} while (lastMemEater != null);
memEater.confuseCompilerOptimization(memEater);
+ InstanceMemEater.hook = null;
return InstanceMemEater.sawOome;
}
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 268f0bebce..3acc643d99 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -22,27 +22,9 @@
#include "jni.h"
#include "stdio.h"
-#include "string.h"
#include "unistd.h"
-#include "native_bridge.h"
-
-
-// Native bridge interfaces...
-
-struct NativeBridgeArtCallbacks {
- const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
- uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
- uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count);
-};
-
-struct NativeBridgeCallbacks {
- bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
- void* (*loadLibrary)(const char* libpath, int flag);
- void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
- bool (*isSupported)(const char* libpath);
-};
+#include "nativebridge/native_bridge.h"
struct NativeBridgeMethod {
const char* name;
@@ -53,7 +35,7 @@ struct NativeBridgeMethod {
};
static NativeBridgeMethod* find_native_bridge_method(const char *name);
-static NativeBridgeArtCallbacks* gNativeBridgeArtCallbacks;
+static const android::NativeBridgeRuntimeCallbacks* gNativeBridgeArtCallbacks;
static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
JNIEnv* env = nullptr;
@@ -225,7 +207,7 @@ static NativeBridgeMethod* find_native_bridge_method(const char *name) {
}
// NativeBridgeCallbacks implementations
-extern "C" bool native_bridge_initialize(NativeBridgeArtCallbacks* art_cbs) {
+extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs) {
if (art_cbs != nullptr) {
gNativeBridgeArtCallbacks = art_cbs;
printf("Native bridge initialized.\n");
@@ -281,7 +263,9 @@ extern "C" bool native_bridge_isSupported(const char* libpath) {
return strcmp(libpath, "libjavacore.so") != 0;
}
-NativeBridgeCallbacks NativeBridgeItf {
+// "NativeBridgeItf" is effectively an API (it is the name of the symbol that will be loaded
+// by the native bridge library).
+android::NativeBridgeCallbacks NativeBridgeItf {
.initialize = &native_bridge_initialize,
.loadLibrary = &native_bridge_loadLibrary,
.getTrampoline = &native_bridge_getTrampoline,
diff --git a/test/116-nodex2oat/run b/test/116-nodex2oat/run
index 5ffeecdbb1..2df670575d 100755
--- a/test/116-nodex2oat/run
+++ b/test/116-nodex2oat/run
@@ -17,6 +17,9 @@
# Remove prebuild from the flags, this test is for testing not having oat files.
flags="${@/--prebuild/}"
+# Use the non-prebuild script.
+RUN="${RUN/push-and-run-prebuilt-test-jar/push-and-run-test-jar}"
+
# Make sure we can run without an oat file,
echo "Run -Xnodex2oat"
${RUN} ${flags} --runtime-option -Xnodex2oat
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
new file mode 100644
index 0000000000..a1293aed67
--- /dev/null
+++ b/test/117-nopatchoat/expected.txt
@@ -0,0 +1,9 @@
+Run without dex2oat/patchoat
+dex2oat & patchoat are disabled, has oat is true, has executable oat is false.
+This is a function call
+Run with dexoat/patchoat
+dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+This is a function call
+Run default
+dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+This is a function call
diff --git a/test/117-nopatchoat/info.txt b/test/117-nopatchoat/info.txt
new file mode 100644
index 0000000000..aa9f57cb03
--- /dev/null
+++ b/test/117-nopatchoat/info.txt
@@ -0,0 +1 @@
+Test that disables patchoat'ing the application.
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
new file mode 100644
index 0000000000..ced7f6ef4f
--- /dev/null
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+class NoPatchoatTest {
+ public:
+ static bool hasExecutableOat(jclass cls) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ const DexFile& dex_file = klass->GetDexFile();
+ const OatFile* oat_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
+ return oat_file != nullptr && oat_file->IsExecutable();
+ }
+};
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
+ return NoPatchoatTest::hasExecutableOat(cls);
+}
+
+} // namespace art
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
new file mode 100755
index 0000000000..a7c96a0d30
--- /dev/null
+++ b/test/117-nopatchoat/run
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ensure flags includes prebuild and relocate. It doesn't make sense unless we
+# have a oat file we want to relocate.
+# TODO Unfortunately we have no way to force prebuild on for both host and target (or skip if not on).
+flags="${@/--relocate/}"
+flags="${flags/--no-relocate/}"
+flags="${flags} --relocate"
+
+# Make sure we can run without relocation
+echo "Run without dex2oat/patchoat"
+# /bin/false is actually not even there for either, so the exec will fail.
+# Unfortunately there is no equivalent to /bin/false in android.
+${RUN} ${flags} --runtime-option -Xnodex2oat
+
+# Make sure we can run with the oat file.
+echo "Run with dexoat/patchoat"
+${RUN} ${flags} --runtime-option -Xdex2oat
+
+# Make sure we can run with the default settings.
+echo "Run default"
+${RUN} ${flags}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
new file mode 100644
index 0000000000..f3f91ce1a5
--- /dev/null
+++ b/test/117-nopatchoat/src/Main.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println(
+ "dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
+ ", has oat is " + hasOat() + ", has executable oat is " + hasExecutableOat() + ".");
+
+ if (!hasOat() && isDex2OatEnabled()) {
+ throw new Error("Application with dex2oat enabled runs without an oat file");
+ }
+
+ System.out.println(functionCall());
+ }
+
+ public static String functionCall() {
+ String arr[] = {"This", "is", "a", "function", "call"};
+ String ret = "";
+ for (int i = 0; i < arr.length; i++) {
+ ret = ret + arr[i] + " ";
+ }
+ return ret.substring(0, ret.length() - 1);
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ private native static boolean isDex2OatEnabled();
+
+ private native static boolean hasOat();
+
+ private native static boolean hasExecutableOat();
+}
diff --git a/test/702-LargeBranchOffset/build b/test/702-LargeBranchOffset/build
new file mode 100644
index 0000000000..eacf730cb9
--- /dev/null
+++ b/test/702-LargeBranchOffset/build
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out a bunch of source files.
+cpp -P src/Main.java.in src/Main.java
+
+mkdir classes
+${JAVAC} -d classes src/*.java
+
+${DX} --debug --dex --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/702-LargeBranchOffset/expected.txt b/test/702-LargeBranchOffset/expected.txt
new file mode 100644
index 0000000000..130678f291
--- /dev/null
+++ b/test/702-LargeBranchOffset/expected.txt
@@ -0,0 +1,5 @@
+0
+0
+2
+1
+512
diff --git a/test/702-LargeBranchOffset/info.txt b/test/702-LargeBranchOffset/info.txt
new file mode 100644
index 0000000000..747263ec4b
--- /dev/null
+++ b/test/702-LargeBranchOffset/info.txt
@@ -0,0 +1 @@
+Simple test to check if large branch offset works correctly.
diff --git a/test/702-LargeBranchOffset/src/Main.java.in b/test/702-LargeBranchOffset/src/Main.java.in
new file mode 100644
index 0000000000..270d766b58
--- /dev/null
+++ b/test/702-LargeBranchOffset/src/Main.java.in
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define DO_2_TIMES(x) x x
+#define DO_4_TIMES(x) DO_2_TIMES(DO_2_TIMES(x))
+#define DO_16_TIMES(x) DO_4_TIMES(DO_4_TIMES(x))
+#define DO_256_TIMES(x) DO_16_TIMES(DO_16_TIMES(x))
+#define DO_512_TIMES(x) DO_256_TIMES(DO_2_TIMES(x))
+
+
+public class Main {
+ public static void main(String[] args) {
+ Main m = new Main();
+ System.out.println(m.foo(-1, -1));
+ System.out.println(m.foo(-1, +1));
+ System.out.println(m.foo(+1, -1));
+ System.out.println(m.foo(+1, +1));
+ System.out.println(m.value);
+ }
+
+ public int foo(int a, int b) {
+ if ( a >= 0 ) {
+ if ( b < 0 ) {
+ DO_512_TIMES( synchronized(lock) { value++; } )
+ return 2;
+ }
+ return 1;
+ }
+ return 0;
+ }
+
+ Object lock = new Object();
+ int value = 0;
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 3871b28ec1..caaf649241 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -24,7 +24,8 @@ LIBARTTEST_COMMON_SRC_FILES := \
004-ReferenceMap/stack_walk_refmap_jni.cc \
004-StackWalk/stack_walk_jni.cc \
004-UnsafeTest/unsafe_test.cc \
- 116-nodex2oat/nodex2oat.cc
+ 116-nodex2oat/nodex2oat.cc \
+ 117-nopatchoat/nopatchoat.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0cf9e16663..ce0eb3f625 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -124,6 +124,42 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-tra
ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-no-prebuild)
ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-no-prebuild)
+# NB 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
+# On host this is patched around by changing a run flag but we cannot do this on the target due to
+# a different run-script.
+TEST_ART_TARGET_BROKEN_PREBUILD_RUN_TESTS := \
+ 116-nodex2oat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-trace,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcverify,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcstress,-prebuild))
+
+# NB 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without --prebuild --relocate
+TEST_ART_BROKEN_RELOCATE_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
+
+TEST_ART_BROKEN_NORELOCATE_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
+
+TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcverify,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
+
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index b5e0204ab8..fab153b624 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -80,16 +80,22 @@ class MyClassNatives {
Object o248, Object o249, Object o250, Object o251, Object o252, Object o253);
native void withoutImplementation();
-
+
native static void stackArgsIntsFirst(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
int i8, int i9, int i10, float f1, float f2, float f3, float f4, float f5, float f6,
float f7, float f8, float f9, float f10);
-
+
native static void stackArgsFloatsFirst(float f1, float f2, float f3, float f4, float f5,
float f6, float f7, float f8, float f9, float f10, int i1, int i2, int i3, int i4, int i5,
int i6, int i7, int i8, int i9, int i10);
-
+
native static void stackArgsMixed(int i1, float f1, int i2, float f2, int i3, float f3, int i4,
float f4, int i5, float f5, int i6, float f6, int i7, float f7, int i8, float f8, int i9,
float f9, int i10, float f10);
+
+ static native double logD(double d);
+ static native float logF(float f);
+ static native boolean returnTrue();
+ static native boolean returnFalse();
+ static native int returnInt();
}
diff --git a/test/run-test b/test/run-test
index 496f7d1b53..6fac03b467 100755
--- a/test/run-test
+++ b/test/run-test
@@ -206,6 +206,15 @@ while true; do
break
fi
done
+
+# tmp_dir may be relative, resolve.
+#
+# Cannot use realpath, as it does not exist on Mac.
+# Cannot us a simple "cd", as the path might not be created yet.
+# Use -m option of readlink: canonicalizes, but allows non-existing components.
+noncanonical_tmp_dir=$tmp_dir
+tmp_dir="`cd $oldwd ; readlink -m $tmp_dir`"
+
mkdir -p $tmp_dir
if [ "$basic_verify" = "true" ]; then
@@ -466,6 +475,8 @@ else
"./${run}" $run_args "$@" >"$output" 2>&1
else
cp "$build_output" "$output"
+ echo "Failed to build in tmpdir=${tmp_dir} from oldwd=${oldwd} and cwd=`pwd`"
+ echo "Non-canonical tmpdir was ${noncanonical_tmp_dir}"
echo "build exit status: $build_exit" >>"$output"
fi
./$check_cmd "$expected" "$output"