summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk2
-rw-r--r--compiler/common_compiler_test.cc5
-rw-r--r--compiler/common_compiler_test.h26
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/verified_method.cc5
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc2
-rw-r--r--compiler/driver/compiler_driver_test.cc4
-rw-r--r--compiler/dwarf/dwarf_test.cc6
-rw-r--r--compiler/dwarf/headers.h6
-rw-r--r--compiler/elf_builder.h15
-rw-r--r--compiler/elf_writer_debug.cc180
-rw-r--r--compiler/image_test.cc2
-rw-r--r--compiler/image_writer.cc31
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/code_generator.cc65
-rw-r--r--compiler/optimizing/code_generator.h22
-rw-r--r--compiler/optimizing/code_generator_arm.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc22
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc4
-rw-r--r--compiler/optimizing/code_generator_mips64.h9
-rw-r--r--compiler/optimizing/code_generator_x86.cc998
-rw-r--r--compiler/optimizing/code_generator_x86.h59
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc1016
-rw-r--r--compiler/optimizing/code_generator_x86_64.h47
-rw-r--r--compiler/optimizing/inliner.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc74
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc75
-rw-r--r--compiler/optimizing/locations.h6
-rw-r--r--compiler/optimizing/nodes.cc10
-rw-r--r--compiler/optimizing/nodes.h7
-rw-r--r--compiler/optimizing/optimizing_compiler.cc31
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc (renamed from compiler/optimizing/constant_area_fixups_x86.cc)32
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.h (renamed from compiler/optimizing/constant_area_fixups_x86.h)12
-rw-r--r--compiler/optimizing/side_effects_test.cc26
-rw-r--r--dex2oat/dex2oat.cc4
-rw-r--r--dexdump/dexdump_main.cc9
-rw-r--r--imgdiag/imgdiag.cc4
-rw-r--r--imgdiag/imgdiag_test.cc2
-rw-r--r--oatdump/oatdump.cc6
-rw-r--r--patchoat/patchoat.cc4
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc3
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc3
-rw-r--r--runtime/arch/mips/entrypoints_direct_mips.h3
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc4
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc3
-rw-r--r--runtime/arch/stub_test.cc35
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc4
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S18
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S11
-rw-r--r--runtime/art_method.cc16
-rw-r--r--runtime/art_method.h21
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/arena_allocator.cc12
-rw-r--r--runtime/base/arena_allocator.h5
-rw-r--r--runtime/base/bit_utils.h1
-rw-r--r--runtime/base/scoped_arena_allocator.cc5
-rw-r--r--runtime/base/scoped_arena_allocator.h7
-rw-r--r--runtime/class_linker.cc691
-rw-r--r--runtime/class_linker.h166
-rw-r--r--runtime/common_runtime_test.h7
-rw-r--r--runtime/common_throws.cc9
-rw-r--r--runtime/common_throws.h3
-rw-r--r--runtime/dex_file.h1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h17
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h3
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc28
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc4
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc20
-rw-r--r--runtime/gc/heap.cc40
-rw-r--r--runtime/gc/heap.h13
-rw-r--r--runtime/gc_root.h2
-rw-r--r--runtime/globals.h14
-rw-r--r--runtime/instrumentation.cc6
-rw-r--r--runtime/intern_table.cc2
-rw-r--r--runtime/interpreter/interpreter.cc6
-rw-r--r--runtime/interpreter/interpreter_common.h13
-rw-r--r--runtime/java_vm_ext.cc49
-rw-r--r--runtime/java_vm_ext.h5
-rw-r--r--runtime/jdwp/jdwp_handler.cc11
-rw-r--r--runtime/mirror/array.h2
-rw-r--r--runtime/mirror/class.cc4
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/string.cc6
-rw-r--r--runtime/mirror/string.h3
-rw-r--r--runtime/modifiers.h4
-rw-r--r--runtime/monitor.cc11
-rw-r--r--runtime/native/dalvik_system_DexFile.cc12
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc28
-rw-r--r--runtime/oat_file_assistant.cc10
-rw-r--r--runtime/oat_file_assistant_test.cc2
-rw-r--r--runtime/oat_file_manager.cc7
-rw-r--r--runtime/os.h4
-rw-r--r--runtime/os_linux.cc12
-rw-r--r--runtime/reflection_test.cc2
-rw-r--r--runtime/runtime.cc9
-rw-r--r--runtime/runtime_options.def7
-rw-r--r--runtime/stack.cc4
-rw-r--r--runtime/thread.cc9
-rw-r--r--runtime/thread.h2
-rw-r--r--runtime/trace.cc2
-rw-r--r--test/061-out-of-memory/expected.txt1
-rw-r--r--test/061-out-of-memory/src/Main.java18
-rw-r--r--test/117-nopatchoat/nopatchoat.cc2
-rw-r--r--test/137-cfi/cfi.cc2
-rw-r--r--test/537-checker-jump-over-jump/src/Main.java26
-rwxr-xr-xtest/964-default-iface-init-generated/util-src/generate_smali.py12
-rwxr-xr-xtest/966-default-conflict/build34
-rw-r--r--test/966-default-conflict/build-src/Iface2.java25
-rw-r--r--test/966-default-conflict/expected.txt18
-rw-r--r--test/966-default-conflict/info.txt6
-rwxr-xr-xtest/966-default-conflict/run17
-rw-r--r--test/966-default-conflict/smali/Iface.smali39
-rw-r--r--test/966-default-conflict/smali/Iface2.smali31
-rw-r--r--test/966-default-conflict/smali/Main.smali227
-rwxr-xr-xtest/967-default-ame/build35
-rw-r--r--test/967-default-ame/build-src/Iface2.java21
-rw-r--r--test/967-default-ame/build-src/Iface3.java19
-rw-r--r--test/967-default-ame/expected.txt18
-rw-r--r--test/967-default-ame/info.txt6
-rwxr-xr-xtest/967-default-ame/run17
-rw-r--r--test/967-default-ame/smali/Iface.smali39
-rw-r--r--test/967-default-ame/smali/Iface2.smali27
-rw-r--r--test/967-default-ame/smali/Iface3.smali26
-rw-r--r--test/967-default-ame/smali/Main.smali228
-rwxr-xr-xtest/968-default-partial-compile-generated/build50
-rw-r--r--test/968-default-partial-compile-generated/expected.txt1
-rw-r--r--test/968-default-partial-compile-generated/info.txt17
-rwxr-xr-xtest/968-default-partial-compile-generated/run17
-rwxr-xr-xtest/968-default-partial-compile-generated/util-src/generate_java.py134
-rwxr-xr-xtest/968-default-partial-compile-generated/util-src/generate_smali.py607
-rw-r--r--test/Android.run-test.mk26
-rwxr-xr-xtest/dexdump/run-all-tests2
-rwxr-xr-xtest/run-test31
-rw-r--r--test/utils/python/testgen/mixins.py6
140 files changed, 4962 insertions, 1088 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index e74a68f608..42ddfd83ab 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -67,7 +67,6 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_utils.cc \
- optimizing/constant_area_fixups_x86.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
@@ -85,6 +84,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/optimization.cc \
optimizing/optimizing_compiler.cc \
optimizing/parallel_move_resolver.cc \
+ optimizing/pc_relative_fixups_x86.cc \
optimizing/prepare_for_register_allocation.cc \
optimizing/primitive_type_propagation.cc \
optimizing/reference_type_propagation.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index c37cecaeac..e6cc50cc5e 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -223,6 +223,11 @@ void CommonCompilerTest::SetCompilerKind(Compiler::Kind compiler_kind) {
compiler_kind_ = compiler_kind;
}
+InstructionSet CommonCompilerTest::GetInstructionSet() const {
+ DCHECK(compiler_driver_.get() != nullptr);
+ return compiler_driver_->GetInstructionSet();
+}
+
void CommonCompilerTest::TearDown() {
timer_.reset();
compiler_driver_.reset();
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 67b4428324..a121f8b7a0 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -61,6 +61,8 @@ class CommonCompilerTest : public CommonRuntimeTest {
Compiler::Kind GetCompilerKind() const;
void SetCompilerKind(Compiler::Kind compiler_kind);
+ InstructionSet GetInstructionSet() const;
+
// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
// driver assumes ownership of the set, so the test should properly release the set.
virtual std::unordered_set<std::string>* GetImageClasses();
@@ -115,6 +117,30 @@ class CommonCompilerTest : public CommonRuntimeTest {
return; \
}
+// TODO: When read barrier works with all compilers in use, get rid of this.
+#define TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK() \
+ if (kUseReadBarrier && GetCompilerKind() == Compiler::kQuick) { \
+ printf("WARNING: TEST DISABLED FOR READ BARRIER WITH QUICK\n"); \
+ return; \
+ }
+
+// TODO: When read barrier works with all Optimizing back ends, get rid of this.
+#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \
+ if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \
+ switch (GetInstructionSet()) { \
+ case kX86: \
+ case kX86_64: \
+ /* Instruction set has read barrier support. */ \
+ break; \
+ \
+ default: \
+ /* Instruction set does not have barrier support. */ \
+ printf("WARNING: TEST DISABLED FOR READ BARRIER WITH OPTIMIZING " \
+ "FOR THIS INSTRUCTION SET\n"); \
+ return; \
+ } \
+ }
+
// TODO: When non-PIC works with all compilers in use, get rid of this.
#define TEST_DISABLED_FOR_NON_PIC_COMPILING_WITH_OPTIMIZING() \
if (GetCompilerKind() == Compiler::kOptimizing) { \
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 6673ea8ac5..05dde9f649 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -673,6 +673,12 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
return nullptr;
}
+ if (kEmitCompilerReadBarrier) {
+ VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file)
+ << " Reason = Quick does not support read barrier.";
+ return nullptr;
+ }
+
// TODO: check method fingerprint here to determine appropriate backend type. Until then, use
// build default.
CompilerDriver* driver = GetCompilerDriver();
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 8eb37cf3dc..0355f116f1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -313,8 +313,9 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(
abstract_method, pointer_size);
}
- if (concrete_method == nullptr || concrete_method->IsAbstract()) {
- // In cases where concrete_method is not found, or is abstract, continue to the next invoke.
+ if (concrete_method == nullptr || !concrete_method->IsInvokable()) {
+ // In cases where concrete_method is not found, or is not invokable, continue to the next
+ // invoke.
continue;
}
if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 14ba81d193..10841e6700 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -329,7 +329,7 @@ inline int CompilerDriver::IsFastInvoke(
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
(methods_class->GetVTableEntry(
resolved_method->GetMethodIndex(), pointer_size) == resolved_method) &&
- !resolved_method->IsAbstract();
+ resolved_method->IsInvokable();
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
@@ -374,7 +374,7 @@ inline int CompilerDriver::IsFastInvoke(
class_loader, nullptr, kVirtual);
}
CHECK(called_method != nullptr);
- CHECK(!called_method->IsAbstract());
+ CHECK(called_method->IsInvokable());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
kDirect, // Sharp type
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ecbc3a2cf7..bf3a8658da 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1552,7 +1552,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*type = sharp_type;
}
} else {
- auto* image_space = heap->GetImageSpace();
+ auto* image_space = heap->GetBootImageSpace();
bool method_in_image = false;
if (image_space != nullptr) {
const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 1107599779..f8de9fa4a1 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -147,6 +147,8 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
@@ -193,6 +195,8 @@ class CompilerDriverMethodsTest : public CompilerDriverTest {
TEST_F(CompilerDriverMethodsTest, Selection) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
Thread* self = Thread::Current();
jobject class_loader;
{
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index a412a99d98..6bb22eda2f 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -237,7 +237,7 @@ TEST_F(DwarfTest, DebugLine) {
std::vector<uintptr_t> debug_line_patches;
std::vector<uintptr_t> expected_patches { 87 }; // NOLINT
WriteDebugLineTable(include_directories, files, opcodes,
- &debug_line_data_, &debug_line_patches);
+ 0, &debug_line_data_, &debug_line_patches);
EXPECT_EQ(expected_patches, debug_line_patches);
CheckObjdumpOutput(is64bit, "-W");
@@ -276,7 +276,7 @@ TEST_F(DwarfTest, DebugLineSpecialOpcodes) {
std::vector<FileEntry> files { { "file.c", 0, 1000, 2000 } }; // NOLINT
std::vector<uintptr_t> debug_line_patches;
WriteDebugLineTable(directories, files, opcodes,
- &debug_line_data_, &debug_line_patches);
+ 0, &debug_line_data_, &debug_line_patches);
CheckObjdumpOutput(is64bit, "-W -WL");
}
@@ -332,7 +332,7 @@ TEST_F(DwarfTest, DebugInfo) {
std::vector<uintptr_t> debug_info_patches;
std::vector<uintptr_t> expected_patches { 16, 20, 29, 33, 42, 46 }; // NOLINT
dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
- &debug_info_data_, &debug_info_patches);
+ 0, &debug_info_data_, &debug_info_patches);
EXPECT_EQ(expected_patches, debug_info_patches);
CheckObjdumpOutput(is64bit, "-W");
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index 883d756885..633e2f7d88 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -126,6 +126,7 @@ void WriteFDE(bool is64bit,
template<typename Vector>
void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
const DebugInfoEntryWriter<Vector>& entries,
+ size_t debug_info_offset, // offset from start of .debug_info.
std::vector<uint8_t>* debug_info,
std::vector<uintptr_t>* debug_info_patches) {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
@@ -141,7 +142,7 @@ void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
writer.UpdateUint32(start, writer.data()->size() - start - 4);
// Copy patch locations and make them relative to .debug_info section.
for (uintptr_t patch_location : entries.GetPatchLocations()) {
- debug_info_patches->push_back(entries_offset + patch_location);
+ debug_info_patches->push_back(debug_info_offset + entries_offset + patch_location);
}
}
@@ -157,6 +158,7 @@ template<typename Vector>
void WriteDebugLineTable(const std::vector<std::string>& include_directories,
const std::vector<FileEntry>& files,
const DebugLineOpCodeWriter<Vector>& opcodes,
+ size_t debug_line_offset, // offset from start of .debug_line.
std::vector<uint8_t>* debug_line,
std::vector<uintptr_t>* debug_line_patches) {
static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
@@ -197,7 +199,7 @@ void WriteDebugLineTable(const std::vector<std::string>& include_directories,
writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4);
// Copy patch locations and make them relative to .debug_line section.
for (uintptr_t patch_location : opcodes.GetPatchLocations()) {
- debug_line_patches->push_back(opcodes_offset + patch_location);
+ debug_line_patches->push_back(debug_line_offset + opcodes_offset + patch_location);
}
}
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 895dfccfe3..6e8dfd60fb 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -156,8 +156,13 @@ class ElfBuilder FINAL {
// Returns the size of the content of this section.
Elf_Word GetSize() const {
- CHECK(finished_);
- return header_.sh_size;
+ if (finished_) {
+ return header_.sh_size;
+ } else {
+ CHECK(started_);
+ CHECK_NE(header_.sh_type, (Elf_Word)SHT_NOBITS);
+ return owner_->Seek(0, kSeekCurrent) - header_.sh_offset;
+ }
}
// Set desired allocation size for .bss section.
@@ -281,6 +286,8 @@ class ElfBuilder FINAL {
strtab_(this, ".strtab", 0, kPageSize),
symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_),
debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0),
+ debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
+ debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
shstrtab_(this, ".shstrtab", 0, 1),
virtual_address_(0) {
text_.phdr_flags_ = PF_R | PF_X;
@@ -300,6 +307,8 @@ class ElfBuilder FINAL {
Section* GetEhFrame() { return &eh_frame_; }
Section* GetEhFrameHdr() { return &eh_frame_hdr_; }
Section* GetDebugFrame() { return &debug_frame_; }
+ Section* GetDebugInfo() { return &debug_info_; }
+ Section* GetDebugLine() { return &debug_line_; }
// Encode patch locations as LEB128 list of deltas between consecutive addresses.
// (exposed publicly for tests)
@@ -667,6 +676,8 @@ class ElfBuilder FINAL {
StringSection strtab_;
SymbolSection symtab_;
Section debug_frame_;
+ Section debug_info_;
+ Section debug_line_;
StringSection shstrtab_;
std::vector<std::unique_ptr<Section>> other_sections_;
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 90db7eb41a..e1ab340b28 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -250,84 +250,98 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder,
}
template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
- const std::vector<OatWriter::DebugInfo>& method_infos) {
+class DebugInfoWriter {
typedef typename ElfTypes::Addr Elf_Addr;
- const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
- Elf_Addr text_address = builder->GetText()->GetAddress();
- // Find all addresses (low_pc) which contain deduped methods.
- // The first instance of method is not marked deduped_, but the rest is.
- std::unordered_set<uint32_t> deduped_addresses;
- for (const OatWriter::DebugInfo& mi : method_infos) {
- if (mi.deduped_) {
- deduped_addresses.insert(mi.low_pc_);
- }
+ public:
+ explicit DebugInfoWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
}
- // Group the methods into compilation units based on source file.
- std::vector<std::vector<const OatWriter::DebugInfo*>> compilation_units;
- const char* last_source_file = nullptr;
- for (const OatWriter::DebugInfo& mi : method_infos) {
- // Attribute given instruction range only to single method.
- // Otherwise the debugger might get really confused.
- if (!mi.deduped_) {
- auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
- const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
- if (compilation_units.empty() || source_file != last_source_file) {
- compilation_units.push_back(std::vector<const OatWriter::DebugInfo*>());
- }
- compilation_units.back().push_back(&mi);
- last_source_file = source_file;
- }
+ void Start() {
+ builder_->GetDebugInfo()->Start();
}
- // Write .debug_info section.
- std::vector<uint8_t> debug_info;
- std::vector<uintptr_t> debug_info_patches;
- std::vector<uint8_t> debug_abbrev;
- std::vector<uint8_t> debug_str;
- std::vector<uint8_t> debug_line;
- std::vector<uintptr_t> debug_line_patches;
- for (const auto& compilation_unit : compilation_units) {
+ void Write(const std::vector<const OatWriter::DebugInfo*>& method_infos,
+ size_t debug_line_offset) {
+ const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+ const Elf_Addr text_address = builder_->GetText()->GetAddress();
uint32_t cunit_low_pc = 0xFFFFFFFFU;
uint32_t cunit_high_pc = 0;
- for (auto method_info : compilation_unit) {
+ for (auto method_info : method_infos) {
cunit_low_pc = std::min(cunit_low_pc, method_info->low_pc_);
cunit_high_pc = std::max(cunit_high_pc, method_info->high_pc_);
}
- size_t debug_abbrev_offset = debug_abbrev.size();
- DebugInfoEntryWriter<> info(is64bit, &debug_abbrev);
+ size_t debug_abbrev_offset = debug_abbrev_.size();
+ DebugInfoEntryWriter<> info(is64bit, &debug_abbrev_);
info.StartTag(DW_TAG_compile_unit, DW_CHILDREN_yes);
- info.WriteStrp(DW_AT_producer, "Android dex2oat", &debug_str);
+ info.WriteStrp(DW_AT_producer, "Android dex2oat", &debug_str_);
info.WriteData1(DW_AT_language, DW_LANG_Java);
info.WriteAddr(DW_AT_low_pc, text_address + cunit_low_pc);
info.WriteAddr(DW_AT_high_pc, text_address + cunit_high_pc);
- info.WriteData4(DW_AT_stmt_list, debug_line.size());
- for (auto method_info : compilation_unit) {
+ info.WriteData4(DW_AT_stmt_list, debug_line_offset);
+ for (auto method_info : method_infos) {
std::string method_name = PrettyMethod(method_info->dex_method_index_,
*method_info->dex_file_, true);
- if (deduped_addresses.find(method_info->low_pc_) != deduped_addresses.end()) {
- method_name += " [DEDUPED]";
- }
info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
- info.WriteStrp(DW_AT_name, method_name.data(), &debug_str);
+ info.WriteStrp(DW_AT_name, method_name.data(), &debug_str_);
info.WriteAddr(DW_AT_low_pc, text_address + method_info->low_pc_);
info.WriteAddr(DW_AT_high_pc, text_address + method_info->high_pc_);
info.EndTag(); // DW_TAG_subprogram
}
info.EndTag(); // DW_TAG_compile_unit
- WriteDebugInfoCU(debug_abbrev_offset, info, &debug_info, &debug_info_patches);
+ std::vector<uint8_t> buffer;
+ buffer.reserve(info.data()->size() + KB);
+ size_t offset = builder_->GetDebugInfo()->GetSize();
+ WriteDebugInfoCU(debug_abbrev_offset, info, offset, &buffer, &debug_info_patches_);
+ builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+ }
+
+ void End() {
+ builder_->GetDebugInfo()->End();
+ builder_->WritePatches(".debug_info.oat_patches", &debug_info_patches_);
+ builder_->WriteSection(".debug_abbrev", &debug_abbrev_);
+ builder_->WriteSection(".debug_str", &debug_str_);
+ }
+
+ private:
+ ElfBuilder<ElfTypes>* builder_;
+ std::vector<uintptr_t> debug_info_patches_;
+ std::vector<uint8_t> debug_abbrev_;
+ std::vector<uint8_t> debug_str_;
+};
+
+template<typename ElfTypes>
+class DebugLineWriter {
+ typedef typename ElfTypes::Addr Elf_Addr;
+
+ public:
+ explicit DebugLineWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
+ }
+
+ void Start() {
+ builder_->GetDebugLine()->Start();
+ }
+
+ // Write line table for given set of methods.
+ // Returns the number of bytes written.
+ size_t Write(const std::vector<const OatWriter::DebugInfo*>& method_infos) {
+ const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+ const Elf_Addr text_address = builder_->GetText()->GetAddress();
+ uint32_t cunit_low_pc = 0xFFFFFFFFU;
+ uint32_t cunit_high_pc = 0;
+ for (auto method_info : method_infos) {
+ cunit_low_pc = std::min(cunit_low_pc, method_info->low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, method_info->high_pc_);
+ }
- // Write .debug_line section.
std::vector<FileEntry> files;
std::unordered_map<std::string, size_t> files_map;
std::vector<std::string> directories;
std::unordered_map<std::string, size_t> directories_map;
int code_factor_bits_ = 0;
int dwarf_isa = -1;
- switch (builder->GetIsa()) {
+ switch (builder_->GetIsa()) {
case kArm: // arm actually means thumb2.
case kThumb2:
code_factor_bits_ = 1; // 16-bit instuctions
@@ -348,7 +362,7 @@ void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
if (dwarf_isa != -1) {
opcodes.SetISA(dwarf_isa);
}
- for (const OatWriter::DebugInfo* mi : compilation_unit) {
+ for (const OatWriter::DebugInfo* mi : method_infos) {
struct DebugInfoCallbacks {
static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
auto* context = reinterpret_cast<DebugInfoCallbacks*>(ctx);
@@ -449,14 +463,70 @@ void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
}
opcodes.AdvancePC(text_address + cunit_high_pc);
opcodes.EndSequence();
- WriteDebugLineTable(directories, files, opcodes, &debug_line, &debug_line_patches);
+ std::vector<uint8_t> buffer;
+ buffer.reserve(opcodes.data()->size() + KB);
+ size_t offset = builder_->GetDebugLine()->GetSize();
+ WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches);
+ builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size());
+ return buffer.size();
+ }
+
+ void End() {
+ builder_->GetDebugLine()->End();
+ builder_->WritePatches(".debug_line.oat_patches", &debug_line_patches);
+ }
+
+ private:
+ ElfBuilder<ElfTypes>* builder_;
+ std::vector<uintptr_t> debug_line_patches;
+};
+
+template<typename ElfTypes>
+void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+ const std::vector<OatWriter::DebugInfo>& method_infos) {
+ struct CompilationUnit {
+ std::vector<const OatWriter::DebugInfo*> methods_;
+ size_t debug_line_offset_ = 0;
+ };
+
+ // Group the methods into compilation units based on source file.
+ std::vector<CompilationUnit> compilation_units;
+ const char* last_source_file = nullptr;
+ for (const OatWriter::DebugInfo& mi : method_infos) {
+ // Attribute given instruction range only to single method.
+ // Otherwise the debugger might get really confused.
+ if (!mi.deduped_) {
+ auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
+ const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
+ if (compilation_units.empty() || source_file != last_source_file) {
+ compilation_units.push_back(CompilationUnit());
+ }
+ compilation_units.back().methods_.push_back(&mi);
+ last_source_file = source_file;
+ }
+ }
+
+ // Write .debug_line section.
+ {
+ DebugLineWriter<ElfTypes> line_writer(builder);
+ line_writer.Start();
+ size_t offset = 0;
+ for (auto& compilation_unit : compilation_units) {
+ compilation_unit.debug_line_offset_ = offset;
+ offset += line_writer.Write(compilation_unit.methods_);
+ }
+ line_writer.End();
+ }
+
+ // Write .debug_info section.
+ {
+ DebugInfoWriter<ElfTypes> info_writer(builder);
+ info_writer.Start();
+ for (const auto& compilation_unit : compilation_units) {
+ info_writer.Write(compilation_unit.methods_, compilation_unit.debug_line_offset_);
+ }
+ info_writer.End();
}
- builder->WriteSection(".debug_info", &debug_info);
- builder->WritePatches(".debug_info.oat_patches", &debug_info_patches);
- builder->WriteSection(".debug_abbrev", &debug_abbrev);
- builder->WriteSection(".debug_str", &debug_str);
- builder->WriteSection(".debug_line", &debug_line);
- builder->WritePatches(".debug_line.oat_patches", &debug_line_patches);
}
// Explicit instantiations
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a38e1f54c0..6df15279a0 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -181,7 +181,7 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(heap->HasImageSpace());
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
- gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
ASSERT_TRUE(image_space != nullptr);
ASSERT_LE(image_space->Size(), image_file_size);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7c1281fd77..3777015858 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -334,9 +334,9 @@ void ImageWriter::PrepareDexCacheArraySlots() {
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *class_linker->DexLock());
uint32_t size = 0u;
- for (jobject weak_root : class_linker->GetDexCaches()) {
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
mirror::DexCache* dex_cache =
- down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
if (dex_cache == nullptr || IsInBootImage(dex_cache)) {
continue;
}
@@ -683,8 +683,8 @@ void ImageWriter::PruneNonImageClasses() {
ScopedAssertNoThreadSuspension sa(self, __FUNCTION__);
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
ReaderMutexLock mu2(self, *class_linker->DexLock());
- for (jobject weak_root : class_linker->GetDexCaches()) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
if (dex_cache == nullptr) {
continue;
}
@@ -806,8 +806,9 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
{
ReaderMutexLock mu(self, *class_linker->DexLock());
// Count number of dex caches not in the boot image.
- for (jobject weak_root : class_linker->GetDexCaches()) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
dex_cache_count += IsInBootImage(dex_cache) ? 0u : 1u;
}
}
@@ -818,15 +819,17 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
ReaderMutexLock mu(self, *class_linker->DexLock());
size_t non_image_dex_caches = 0;
// Re-count number of non image dex caches.
- for (jobject weak_root : class_linker->GetDexCaches()) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
non_image_dex_caches += IsInBootImage(dex_cache) ? 0u : 1u;
}
CHECK_EQ(dex_cache_count, non_image_dex_caches)
<< "The number of non-image dex caches changed.";
size_t i = 0;
- for (jobject weak_root : class_linker->GetDexCaches()) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
if (!IsInBootImage(dex_cache)) {
dex_caches->Set<false>(i, dex_cache);
++i;
@@ -1589,7 +1592,7 @@ const uint8_t* ImageWriter::GetOatAddress(OatAddress type) const {
// If we are compiling an app image, we need to use the stubs of the boot image.
if (compile_app_image_) {
// Use the current image pointers.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
DCHECK(image_space != nullptr);
const OatFile* oat_file = image_space->GetOatFile();
CHECK(oat_file != nullptr);
@@ -1621,7 +1624,7 @@ const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_inter
DCHECK(!method->IsResolutionMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtConflictMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtUnimplementedMethod()) << PrettyMethod(method);
- DCHECK(!method->IsAbstract()) << PrettyMethod(method);
+ DCHECK(method->IsInvokable()) << PrettyMethod(method);
DCHECK(!IsInBootImage(method)) << PrettyMethod(method);
// Use original code if it exists. Otherwise, set the code pointer to the resolution
@@ -1668,7 +1671,7 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
- if (UNLIKELY(method->IsAbstract())) {
+ if (UNLIKELY(!method->IsInvokable())) {
return GetOatAddress(kOatAddressQuickToInterpreterBridge);
} else {
bool quick_is_interpreted;
@@ -1714,7 +1717,7 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
- if (UNLIKELY(orig->IsAbstract())) {
+ if (UNLIKELY(!orig->IsInvokable())) {
copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(kOatAddressQuickToInterpreterBridge), target_ptr_size_);
} else {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 16f641ab56..030451c1cb 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -232,7 +232,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(113 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(114 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 3f2271ef11..40a3f14f93 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -899,7 +899,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
// NOTE: We're using linker patches for app->boot references when the image can
// be relocated and therefore we need to emit .oat_patches. We're not using this
// for app->app references, so check that the method is an image method.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ce92470868..77d53fcd8f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -208,6 +208,7 @@ class DisassemblyScope {
void CodeGenerator::GenerateSlowPaths() {
size_t code_start = 0;
for (SlowPathCode* slow_path : slow_paths_) {
+ current_slow_path_ = slow_path;
if (disasm_info_ != nullptr) {
code_start = GetAssembler()->CodeSize();
}
@@ -216,6 +217,7 @@ void CodeGenerator::GenerateSlowPaths() {
disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
}
}
+ current_slow_path_ = nullptr;
}
void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
@@ -308,7 +310,7 @@ size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t l
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fp_registers,
+ size_t maximum_number_of_live_fpu_registers,
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order) {
block_order_ = &block_order;
@@ -322,14 +324,14 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
&& IsLeafMethod()
&& !RequiresCurrentMethod()) {
DCHECK_EQ(maximum_number_of_live_core_registers, 0u);
- DCHECK_EQ(maximum_number_of_live_fp_registers, 0u);
+ DCHECK_EQ(maximum_number_of_live_fpu_registers, 0u);
SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
} else {
SetFrameSize(RoundUp(
number_of_spill_slots * kVRegSize
+ number_of_out_slots * kVRegSize
+ maximum_number_of_live_core_registers * GetWordSize()
- + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize()
+ + maximum_number_of_live_fpu_registers * GetFloatingPointSpillSlotSize()
+ FrameEntrySpillSize(),
kStackAlignment));
}
@@ -545,15 +547,19 @@ void CodeGenerator::GenerateUnresolvedFieldAccess(
}
}
+// TODO: Remove argument `code_generator_supports_read_barrier` when
+// all code generators have read barrier support.
void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
Location runtime_type_index_location,
- Location runtime_return_location) {
+ Location runtime_return_location,
+ bool code_generator_supports_read_barrier) {
ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
? LocationSummary::kCall
- : (cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ : (((code_generator_supports_read_barrier && kEmitCompilerReadBarrier) ||
+ cls->CanCallRuntime())
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
if (cls->NeedsAccessCheck()) {
locations->SetInAt(0, Location::NoLocation());
@@ -1318,21 +1324,38 @@ void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCod
// coherent with the runtime call generated, and that the GC side effect is
// set when required.
if (slow_path == nullptr) {
- DCHECK(instruction->GetLocations()->WillCall()) << instruction->DebugName();
+ DCHECK(instruction->GetLocations()->WillCall())
+ << "instruction->DebugName()=" << instruction->DebugName();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
- << instruction->DebugName() << instruction->GetSideEffects().ToString();
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal())
- << instruction->DebugName() << slow_path->GetDescription();
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
// Control flow would not come back into the code if a fatal slow
// path is taken, so we do not care if it triggers GC.
slow_path->IsFatal() ||
// HDeoptimize is a special case: we know we are not coming back from
// it into the code.
- instruction->IsDeoptimize())
- << instruction->DebugName() << instruction->GetSideEffects().ToString()
- << slow_path->GetDescription();
+ instruction->IsDeoptimize() ||
+ // When read barriers are enabled, some instructions use a
+ // slow path to emit a read barrier, which does not trigger
+ // GC, is not fatal, nor is emitted by HDeoptimize
+ // instructions.
+ (kEmitCompilerReadBarrier &&
+ (instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArraySet() ||
+ instruction->IsArrayGet() ||
+ instruction->IsLoadClass() ||
+ instruction->IsLoadString() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsCheckCast())))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
}
// Check the coherency of leaf information.
@@ -1344,11 +1367,12 @@ void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCod
}
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
+ RegisterSet* live_registers = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+
for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
if (!codegen->IsCoreCalleeSaveRegister(i)) {
- if (register_set->ContainsCoreRegister(i)) {
+ if (live_registers->ContainsCoreRegister(i)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
locations->SetStackBit(stack_offset / kVRegSize);
@@ -1363,7 +1387,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
- if (register_set->ContainsFloatingPointRegister(i)) {
+ if (live_registers->ContainsFloatingPointRegister(i)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
@@ -1374,12 +1398,14 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
}
void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
+ RegisterSet* live_registers = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+
for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
if (!codegen->IsCoreCalleeSaveRegister(i)) {
- if (register_set->ContainsCoreRegister(i)) {
+ if (live_registers->ContainsCoreRegister(i)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
}
}
@@ -1387,8 +1413,9 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
- if (register_set->ContainsFloatingPointRegister(i)) {
+ if (live_registers->ContainsFloatingPointRegister(i)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a92014dc79..114d97be94 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -201,7 +201,7 @@ class CodeGenerator {
virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
void InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fp_registers,
+ size_t maximum_number_of_live_fpu_registers,
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order);
int32_t GetStackSlot(HLocal* local) const;
@@ -250,6 +250,15 @@ class CodeGenerator {
// Returns whether we should split long moves in parallel moves.
virtual bool ShouldSplitLongMoves() const { return false; }
+ size_t GetNumberOfCoreCalleeSaveRegisters() const {
+ return POPCOUNT(core_callee_save_mask_);
+ }
+
+ size_t GetNumberOfCoreCallerSaveRegisters() const {
+ DCHECK_GE(GetNumberOfCoreRegisters(), GetNumberOfCoreCalleeSaveRegisters());
+ return GetNumberOfCoreRegisters() - GetNumberOfCoreCalleeSaveRegisters();
+ }
+
bool IsCoreCalleeSaveRegister(int reg) const {
return (core_callee_save_mask_ & (1 << reg)) != 0;
}
@@ -416,7 +425,8 @@ class CodeGenerator {
// TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
static void CreateLoadClassLocationSummary(HLoadClass* cls,
Location runtime_type_index_location,
- Location runtime_return_location);
+ Location runtime_return_location,
+ bool code_generator_supports_read_barrier = false);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
@@ -490,6 +500,7 @@ class CodeGenerator {
compiler_options_(compiler_options),
src_map_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ current_slow_path_(nullptr),
current_block_index_(0),
is_leaf_(true),
requires_current_method_(false) {
@@ -557,6 +568,10 @@ class CodeGenerator {
return raw_pointer_to_labels_array + block->GetBlockId();
}
+ SlowPathCode* GetCurrentSlowPath() {
+ return current_slow_path_;
+ }
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
@@ -605,6 +620,9 @@ class CodeGenerator {
ArenaVector<SrcMapElem> src_map_;
ArenaVector<SlowPathCode*> slow_paths_;
+ // The current slow path that we're generating code for.
+ SlowPathCode* current_slow_path_;
+
// The current block index in `block_order_` of the block
// we are generating code for.
size_t current_block_index_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e30aa6e51c..54c6cc8890 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2694,7 +2694,7 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
case Primitive::kPrimInt: {
if (div->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
@@ -2818,7 +2818,7 @@ void LocationsBuilderARM::VisitRem(HRem* rem) {
case Primitive::kPrimInt: {
if (rem->InputAt(1)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
+ locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
if (abs_imm <= 1) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b0be446174..7e248b402a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -598,7 +598,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
call_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -2872,21 +2872,21 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
// Add ADRP with its PC-relative DexCache access patch.
- pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
- invoke->GetDexCacheArrayOffset());
- vixl::Label* pc_insn_label = &pc_rel_dex_cache_patches_.back().label;
+ pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetDexCacheArrayOffset());
+ vixl::Label* pc_insn_label = &pc_relative_dex_cache_patches_.back().label;
{
vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
__ adrp(XRegisterFrom(temp).X(), 0);
}
__ Bind(pc_insn_label); // Bind after ADRP.
- pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
+ pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
// Add LDR with its PC-relative DexCache access patch.
- pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
- invoke->GetDexCacheArrayOffset());
+ pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetDexCacheArrayOffset());
__ Ldr(XRegisterFrom(temp).X(), MemOperand(XRegisterFrom(temp).X(), 0));
- __ Bind(&pc_rel_dex_cache_patches_.back().label); // Bind after LDR.
- pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
+ __ Bind(&pc_relative_dex_cache_patches_.back().label); // Bind after LDR.
+ pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -2973,7 +2973,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
method_patches_.size() +
call_patches_.size() +
relative_call_patches_.size() +
- pc_rel_dex_cache_patches_.size();
+ pc_relative_dex_cache_patches_.size();
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
const MethodReference& target_method = entry.first;
@@ -2994,7 +2994,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
info.target_method.dex_file,
info.target_method.dex_method_index));
}
- for (const PcRelativeDexCacheAccessInfo& info : pc_rel_dex_cache_patches_) {
+ for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location() - 4u,
&info.target_dex_file,
info.pc_insn_label->location() - 4u,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ab684ea538..aa5ad386e1 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -447,7 +447,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Using ArenaDeque<> which retains element addresses on push/emplace_back().
ArenaDeque<MethodPatchInfo<vixl::Label>> relative_call_patches_;
// PC-relative DexCache access info.
- ArenaDeque<PcRelativeDexCacheAccessInfo> pc_rel_dex_cache_patches_;
+ ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b36a04216d..851bced09a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -420,7 +420,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
: CodeGenerator(graph,
kNumberOfGpuRegisters,
kNumberOfFpuRegisters,
- 0, // kNumberOfRegisterPairs
+ /* number_of_register_pairs */ 0,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -2977,7 +2977,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
CodeGenerator::CreateLoadClassLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(A0));
+ calling_convention.GetReturnLocation(cls->GetType()));
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 58c6e0fa83..ac3162f736 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -119,9 +119,12 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::RegisterLocation(V0);
}
- Location GetSetValueLocation(
- Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
- return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(A2)
+ : (is_instance
+ ? Location::RegisterLocation(A2)
+ : Location::RegisterLocation(A1));
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
return Location::FpuRegisterLocation(F0);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8308d9ee20..32dc636d1d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -19,7 +19,6 @@
#include "art_method.h"
#include "code_generator_utils.h"
#include "compiled_method.h"
-#include "constant_area_fixups_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
@@ -27,6 +26,7 @@
#include "intrinsics_x86.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
+#include "pc_relative_fixups_x86.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -35,6 +35,9 @@
namespace art {
+template<class MirrorType>
+class GcRoot;
+
namespace x86 {
static constexpr int kCurrentMethodStackOffset = 0;
@@ -300,15 +303,6 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->IsCheckCast()) {
- // The codegen for the instruction overwrites `temp`, so put it back in place.
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- }
-
if (!is_fatal_) {
SaveLiveRegisters(codegen, locations);
}
@@ -329,12 +323,15 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
instruction_,
instruction_->GetDexPc(),
this);
+ CheckEntrypointTypes<
+ kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
instruction_,
instruction_->GetDexPc(),
this);
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
if (!is_fatal_) {
@@ -425,6 +422,221 @@ class ArraySetSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
};
+// Slow path generating a read barrier for a heap reference.
+class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
+ public:
+ ReadBarrierForHeapReferenceSlowPathX86(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index)
+ : instruction_(instruction),
+ out_(out),
+ ref_(ref),
+ obj_(obj),
+ offset_(offset),
+ index_(index) {
+ DCHECK(kEmitCompilerReadBarrier);
+ // If `obj` is equal to `out` or `ref`, it means the initial object
+ // has been overwritten by (or after) the heap object reference load
+ // to be instrumented, e.g.:
+ //
+ // __ movl(out, Address(out, offset));
+ // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset);
+ //
+ // In that case, we have lost the information about the original
+ // object, and the emitted read barrier cannot work properly.
+ DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
+ DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(!instruction_->IsInvoke() ||
+ (instruction_->IsInvokeStaticOrDirect() &&
+ instruction_->GetLocations()->Intrinsified()));
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We may have to change the index's value, but as `index_` is a
+ // constant member (like other "inputs" of this slow path),
+ // introduce a copy of it, `index`.
+ Location index = index_;
+ if (index_.IsValid()) {
+ // Handle `index_` for HArrayGet and intrinsic UnsafeGetObject.
+ if (instruction_->IsArrayGet()) {
+ // Compute the actual memory offset and store it in `index`.
+ Register index_reg = index_.AsRegister<Register>();
+ DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
+ if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
+ // We are about to change the value of `index_reg` (see the
+ // calls to art::x86::X86Assembler::shll and
+ // art::x86::X86Assembler::AddImmediate below), but it has
+ // not been saved by the previous call to
+ // art::SlowPathCode::SaveLiveRegisters, as it is a
+ // callee-save register --
+ // art::SlowPathCode::SaveLiveRegisters does not consider
+ // callee-save registers, as it has been designed with the
+ // assumption that callee-save registers are supposed to be
+ // handled by the called function. So, as a callee-save
+ // register, `index_reg` _would_ eventually be saved onto
+ // the stack, but it would be too late: we would have
+ // changed its value earlier. Therefore, we manually save
+ // it here into another freely available register,
+ // `free_reg`, chosen of course among the caller-save
+ // registers (as a callee-save `free_reg` register would
+ // exhibit the same problem).
+ //
+ // Note we could have requested a temporary register from
+ // the register allocator instead; but we prefer not to, as
+ // this is a slow path, and we know we can find a
+ // caller-save register that is available.
+ Register free_reg = FindAvailableCallerSaveRegister(codegen);
+ __ movl(free_reg, index_reg);
+ index_reg = free_reg;
+ index = Location::RegisterLocation(index_reg);
+ } else {
+ // The initial register stored in `index_` has already been
+ // saved in the call to art::SlowPathCode::SaveLiveRegisters
+ // (as it is not a callee-save register), so we can freely
+ // use it.
+ }
+ // Shifting the index value contained in `index_reg` by the scale
+ // factor (2) cannot overflow in practice, as the runtime is
+ // unable to allocate object arrays with a size larger than
+ // 2^26 - 1 (that is, 2^28 - 4 bytes).
+ __ shll(index_reg, Immediate(TIMES_4));
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ __ AddImmediate(index_reg, Immediate(offset_));
+ } else {
+ DCHECK(instruction_->IsInvoke());
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
+ (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
+ << instruction_->AsInvoke()->GetIntrinsic();
+ DCHECK_EQ(offset_, 0U);
+ DCHECK(index_.IsRegisterPair());
+ // UnsafeGet's offset location is a register pair, the low
+ // part contains the correct offset.
+ index = index_.ToLow();
+ }
+ }
+
+ // We're moving two or three locations to locations that could
+ // overlap, so we need a parallel move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(ref_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(obj_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot,
+ nullptr);
+ if (index.IsValid()) {
+ parallel_move.AddMove(index,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimInt,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ } else {
+ codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ __ movl(calling_convention.GetRegisterAt(2), Immediate(offset_));
+ }
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<
+ kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
+ x86_codegen->Move32(out_, Location::RegisterLocation(EAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathX86"; }
+
+ private:
+ Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
+ size_t ref = static_cast<int>(ref_.AsRegister<Register>());
+ size_t obj = static_cast<int>(obj_.AsRegister<Register>());
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
+ return static_cast<Register>(i);
+ }
+ }
+ // We shall never fail to find a free caller-save register, as
+ // there are more than two core caller-save registers on x86
+ // (meaning it is possible to find one which is different from
+ // `ref` and `obj`).
+ DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
+ LOG(FATAL) << "Could not find a free caller-save register";
+ UNREACHABLE();
+ }
+
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location ref_;
+ const Location obj_;
+ const uint32_t offset_;
+ // An additional location containing an index to an array.
+ // Only used for HArrayGet and the UnsafeGetObject &
+ // UnsafeGetObjectVolatile intrinsics.
+ const Location index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathX86);
+};
+
+// Slow path generating a read barrier for a GC root.
+class ReadBarrierForRootSlowPathX86 : public SlowPathCode {
+ public:
+ ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root)
+ : instruction_(instruction), out_(out), root_(root) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString());
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
+ x86_codegen->Move32(out_, Location::RegisterLocation(EAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; }
+
+ private:
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location root_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86);
+};
+
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
@@ -513,9 +725,9 @@ void CodeGeneratorX86::InvokeRuntime(int32_t entry_point_offset,
}
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
- const X86InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options,
- OptimizingCompilerStats* stats)
+ const X86InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCpuRegisters,
kNumberOfXmmRegisters,
@@ -533,6 +745,7 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
@@ -581,7 +794,7 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
LOG(FATAL) << "Unreachable type " << type;
}
- return Location();
+ return Location::NoLocation();
}
void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline) const {
@@ -782,7 +995,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type t
LOG(FATAL) << "Unexpected parameter type " << type;
break;
}
- return Location();
+ return Location::NoLocation();
}
void CodeGeneratorX86::Move32(Location destination, Location source) {
@@ -1696,11 +1909,20 @@ void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
+ if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
+ invoke->GetLocations()->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::Any());
+ }
return;
}
HandleInvoke(invoke);
+ // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
+ if (invoke->HasPcRelativeDexCache()) {
+ invoke->GetLocations()->SetInAt(invoke->GetCurrentMethodInputIndex(),
+ Location::RequiresRegister());
+ }
+
if (codegen_->IsBaseline()) {
// Baseline does not have enough registers if the current method also
// needs a register. We therefore do not require a register for it, and let
@@ -1757,6 +1979,9 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // This call to HandleInvoke allocates a temporary (core) register
+ // which is also used to transfer the hidden argument from FP to
+ // core register.
HandleInvoke(invoke);
// Add the hidden argument.
invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7));
@@ -1764,31 +1989,42 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ LocationSummary* locations = invoke->GetLocations();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ XmmRegister hidden_reg = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
- LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- // Set the hidden argument.
+ // Set the hidden argument. This is safe to do this here, as XMM7
+ // won't be modified thereafter, before the `call` instruction.
+ DCHECK_EQ(XMM7, hidden_reg);
__ movl(temp, Immediate(invoke->GetDexMethodIndex()));
- __ movd(invoke->GetLocations()->GetTemp(1).AsFpuRegister<XmmRegister>(), temp);
+ __ movd(hidden_reg, temp);
- // temp = object->GetClass();
if (receiver.IsStackSlot()) {
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ // /* HeapReference<Class> */ temp = temp->klass_
__ movl(temp, Address(temp, class_offset));
} else {
+ // /* HeapReference<Class> */ temp = receiver->klass_
__ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86WordSize).Int32Value()));
+ __ call(Address(temp,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -3014,7 +3250,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
DCHECK_EQ(EAX, first.AsRegister<Register>());
DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
- if (instruction->InputAt(1)->IsIntConstant()) {
+ if (second.IsConstant()) {
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
if (imm == 0) {
@@ -3779,16 +4015,6 @@ void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
MethodReference target_method ATTRIBUTE_UNUSED) {
- if (desired_dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
- }
switch (desired_dispatch_info.code_ptr_location) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
@@ -3805,6 +4031,32 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOr
}
}
+Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
+ Register temp) {
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ Location location = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ if (!invoke->GetLocations()->Intrinsified()) {
+ return location.AsRegister<Register>();
+ }
+ // For intrinsics we allow any location, so it may be on the stack.
+ if (!location.IsRegister()) {
+ __ movl(temp, Address(ESP, location.GetStackIndex()));
+ return temp;
+ }
+ // For register locations, check if the register was saved. If so, get it from the stack.
+ // Note: There is a chance that the register was saved but not overwritten, so we could
+ // save one load. However, since this is just an intrinsic slow path we prefer this
+ // simple and more robust approach rather that trying to determine if that's the case.
+ SlowPathCode* slow_path = GetCurrentSlowPath();
+ DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ movl(temp, Address(ESP, stack_offset));
+ return temp;
+ }
+ return location.AsRegister<Register>();
+}
+
void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
@@ -3823,11 +4075,16 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
method_patches_.emplace_back(invoke->GetTargetMethod());
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
+ temp.AsRegister<Register>());
+ uint32_t offset = invoke->GetDexCacheArrayOffset();
+ __ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
+ // Add the patch entry and bind its label at the end of the instruction.
+ pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file, offset);
+ __ Bind(&pc_relative_dex_cache_patches_.back().label);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register method_reg;
@@ -3840,7 +4097,7 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
method_reg = reg;
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
}
- // temp = temp->dex_cache_resolved_methods_;
+ // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ movl(reg, Address(method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kX86PointerSize).Int32Value()));
// temp = temp[index_in_cache]
@@ -3884,10 +4141,17 @@ void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- // temp = object->GetClass();
DCHECK(receiver.IsRegister());
+ // /* HeapReference<Class> */ temp = receiver->klass_
__ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -3898,23 +4162,33 @@ void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
- linker_patches->reserve(method_patches_.size() + relative_call_patches_.size());
+ size_t size =
+ method_patches_.size() +
+ relative_call_patches_.size() +
+ pc_relative_dex_cache_patches_.size();
+ linker_patches->reserve(size);
+ // The label points to the end of the "movl" insn but the literal offset for method
+ // patch needs to point to the embedded constant which occupies the last 4 bytes.
+ constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
for (const MethodPatchInfo<Label>& info : method_patches_) {
- // The label points to the end of the "movl" insn but the literal offset for method
- // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
- uint32_t literal_offset = info.label.Position() - 4;
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
info.target_method.dex_file,
info.target_method.dex_method_index));
}
for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
- // The label points to the end of the "call" insn but the literal offset for method
- // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
- uint32_t literal_offset = info.label.Position() - 4;
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
info.target_method.dex_file,
info.target_method.dex_method_index));
}
+ for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
+ &info.target_dex_file,
+ GetMethodAddressOffset(),
+ info.element_offset));
+ }
}
void CodeGeneratorX86::MarkGCCard(Register temp,
@@ -3939,18 +4213,29 @@ void CodeGeneratorX86::MarkGCCard(Register temp,
void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+
+ bool object_field_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ kEmitCompilerReadBarrier ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
} else {
- // The output overlaps in case of long: we don't want the low move to overwrite
- // the object's location.
- locations->SetOut(Location::RequiresRegister(),
- (instruction->GetType() == Primitive::kPrimLong) ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
+ // The output overlaps in case of long: we don't want the low move
+ // to overwrite the object's location. Likewise, in the case of
+ // an object field get with read barriers enabled, we do not want
+ // the move to overwrite the object's location, as we need it to emit
+ // the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ (object_field_get_with_read_barrier || instruction->GetType() == Primitive::kPrimLong) ?
+ Location::kOutputOverlap :
+ Location::kNoOutputOverlap);
}
if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) {
@@ -3966,7 +4251,8 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- Register base = locations->InAt(0).AsRegister<Register>();
+ Location base_loc = locations->InAt(0);
+ Register base = base_loc.AsRegister<Register>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
Primitive::Type field_type = field_info.GetFieldType();
@@ -4041,7 +4327,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
}
if (field_type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(out.AsRegister<Register>());
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset);
}
}
@@ -4369,24 +4655,35 @@ void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) {
}
void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
+ bool object_array_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_array_get_with_read_barrier ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
- // The output overlaps in case of long: we don't want the low move to overwrite
- // the array's location.
- locations->SetOut(Location::RequiresRegister(),
- (instruction->GetType() == Primitive::kPrimLong) ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
+ // The output overlaps in case of long: we don't want the low move
+ // to overwrite the array's location. Likewise, in the case of an
+ // object array get with read barriers enabled, we do not want the
+ // move to overwrite the array's location, as we need it to emit
+ // the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ (instruction->GetType() == Primitive::kPrimLong || object_array_get_with_read_barrier) ?
+ Location::kOutputOverlap :
+ Location::kNoOutputOverlap);
}
}
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
Location index = locations->InAt(1);
Primitive::Type type = instruction->GetType();
@@ -4441,6 +4738,9 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
if (index.IsConstant()) {
@@ -4505,8 +4805,17 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
}
if (type == Primitive::kPrimNot) {
- Register out = locations->Out().AsRegister<Register>();
- __ MaybeUnpoisonHeapReference(out);
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Location out = locations->Out();
+ if (index.IsConstant()) {
+ uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset);
+ } else {
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index);
+ }
}
}
@@ -4517,14 +4826,18 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
// optimization.
Primitive::Type value_type = instruction->GetComponentType();
+
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-
- bool may_need_runtime_call = instruction->NeedsTypeCheck();
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+ bool object_array_set_with_read_barrier =
+ kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+ (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
bool is_byte_type = (value_type == Primitive::kPrimBoolean)
|| (value_type == Primitive::kPrimByte);
@@ -4545,20 +4858,21 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
// Temporary registers for the write barrier.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
// Ensure the card is in a byte register.
- locations->AddTemp(Location::RegisterLocation(ECX));
+ locations->AddTemp(Location::RegisterLocation(ECX)); // Possibly used for read barrier too.
}
}
void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register array = locations->InAt(0).AsRegister<Register>();
+ Location array_loc = locations->InAt(0);
+ Register array = array_loc.AsRegister<Register>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
- bool may_need_runtime_call = locations->CanCall();
+ bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
@@ -4598,6 +4912,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Address address = index.IsConstant()
? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
: Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+
if (!value.IsRegister()) {
// Just setting null.
DCHECK(instruction->InputAt(2)->IsNullConstant());
@@ -4605,7 +4920,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
__ movl(address, Immediate(0));
codegen_->MaybeRecordImplicitNullCheck(instruction);
DCHECK(!needs_write_barrier);
- DCHECK(!may_need_runtime_call);
+ DCHECK(!may_need_runtime_call_for_type_check);
break;
}
@@ -4614,7 +4929,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
NearLabel done, not_null, do_put;
SlowPathCode* slow_path = nullptr;
Register temp = locations->GetTemp(0).AsRegister<Register>();
- if (may_need_runtime_call) {
+ if (may_need_runtime_call_for_type_check) {
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
@@ -4626,22 +4941,62 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
__ Bind(&not_null);
}
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ MaybeUnpoisonHeapReference(temp);
- __ movl(temp, Address(temp, component_offset));
- // No need to poison/unpoison, we're comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- __ MaybeUnpoisonHeapReference(temp);
- __ movl(temp, Address(temp, super_offset));
- // No need to unpoison, we're comparing against null..
- __ testl(temp, temp);
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
+ if (kEmitCompilerReadBarrier) {
+ // When read barriers are enabled, the type checking
+ // instrumentation requires two read barriers:
+ //
+ // __ movl(temp2, temp);
+ // // /* HeapReference<Class> */ temp = temp->component_type_
+ // __ movl(temp, Address(temp, component_offset));
+ // codegen_->GenerateReadBarrier(
+ // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = register_value->klass_
+ // __ movl(temp2, Address(register_value, class_offset));
+ // codegen_->GenerateReadBarrier(
+ // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
+ //
+ // __ cmpl(temp, temp2);
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled.
+ __ jmp(slow_path->GetEntryLabel());
} else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ movl(temp, Address(temp, super_offset));
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ testl(temp, temp);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
}
}
@@ -4652,7 +5007,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
} else {
__ movl(address, register_value);
}
- if (!may_need_runtime_call) {
+ if (!may_need_runtime_call_for_type_check) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -4667,6 +5022,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
break;
}
+
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Address address = index.IsConstant()
@@ -5137,7 +5493,8 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
CodeGenerator::CreateLoadClassLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(EAX));
+ Location::RegisterLocation(EAX),
+ /* code_generator_supports_read_barrier */ true);
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
@@ -5151,18 +5508,40 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
return;
}
- Register out = locations->Out().AsRegister<Register>();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
+
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
+ uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
+ __ leal(out, Address(current_method, declaring_class_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ __ movl(out, Address(current_method, declaring_class_offset));
+ }
} else {
DCHECK(cls->CanCallRuntime());
- __ movl(out, Address(
- current_method, ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
- __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
- // TODO: We will need a read barrier here.
+ // /* GcRoot<mirror::Class>[] */ out =
+ // current_method.ptr_sized_fields_->dex_cache_resolved_types_
+ __ movl(out, Address(current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
+
+ size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &out[type_index]
+ __ leal(out, Address(out, cache_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ __ movl(out, Address(out, cache_offset));
+ }
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -5216,12 +5595,35 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = load->GetLocations();
- Register out = locations->Out().AsRegister<Register>();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
- __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
+ __ leal(out, Address(current_method, declaring_class_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ __ movl(out, Address(current_method, declaring_class_offset));
+ }
+
+ // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
- __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- // TODO: We will need a read barrier here.
+
+ size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex());
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::String>* */ out = &out[string_index]
+ __ leal(out, Address(out, cache_offset));
+ // /* mirror::String* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ __ movl(out, Address(out, cache_offset));
+ }
+
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5265,40 +5667,44 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- switch (instruction->GetTypeCheckKind()) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = LocationSummary::kNoCall;
+ call_kind =
+ kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCall;
- break;
- case TypeCheckKind::kArrayCheck:
call_kind = LocationSummary::kCallOnSlowPath;
break;
}
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (call_kind != LocationSummary::kCall) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86 uses this register too.
- locations->SetOut(Location::RequiresRegister());
- } else {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(Location::RegisterLocation(EAX));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this "out" register too.
+ locations->SetOut(Location::RequiresRegister());
+ // When read barriers are enabled, we need a temporary register for
+ // some cases.
+ if (kEmitCompilerReadBarrier &&
+ (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
Location cls = locations->InAt(1);
- Register out = locations->Out().AsRegister<Register>();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5313,15 +5719,9 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface/unresolved check, we put the object class into the object register.
- // This is safe, as the register is caller-save, and the object must be in another
- // register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
- (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
- ? obj
- : out;
- __ movl(target, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(target);
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ movl(out, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset);
switch (instruction->GetTypeCheckKind()) {
case TypeCheckKind::kExactCheck: {
@@ -5338,13 +5738,23 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ jmp(&done);
break;
}
+
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop;
__ Bind(&loop);
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp = temp_loc.AsRegister<Register>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->super_class_
__ movl(out, Address(out, super_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5361,6 +5771,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
NearLabel loop, success;
@@ -5372,8 +5783,17 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
__ j(kEqual, &success);
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp = temp_loc.AsRegister<Register>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->super_class_
__ movl(out, Address(out, super_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5385,6 +5805,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
NearLabel exact_check;
@@ -5395,9 +5816,18 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
__ j(kEqual, &exact_check);
- // Otherwise, we need to check that the object's class is a non primitive array.
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp = temp_loc.AsRegister<Register>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->component_type_
__ movl(out, Address(out, component_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5408,6 +5838,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ jmp(&done);
break;
}
+
case TypeCheckKind::kArrayCheck: {
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<Register>());
@@ -5416,8 +5847,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -5426,13 +5857,25 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- default: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ case TypeCheckKind::kInterfaceCheck: {
+ // Note that we indeed only call on slow path, but we always go
+ // into the slow path for the unresolved & interface check
+ // cases.
+ //
+ // We cannot directly call the InstanceofNonTrivial runtime
+ // entry point without resorting to a type checking slow path
+ // here (i.e. by calling InvokeRuntime directly), as it would
+ // require to assign fixed registers for the inputs of this
+ // HInstanceOf instruction (following the runtime calling
+ // convention), which might be cluttered by the potential first
+ // read barrier emission at the beginning of this method.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
__ jmp(&done);
}
@@ -5457,75 +5900,73 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
- switch (instruction->GetTypeCheckKind()) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = throws_into_catch
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- break;
- case TypeCheckKind::kInterfaceCheck:
- case TypeCheckKind::kUnresolvedCheck:
- call_kind = LocationSummary::kCall;
+ call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
break;
case TypeCheckKind::kArrayCheck:
+ case TypeCheckKind::kUnresolvedCheck:
+ case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCallOnSlowPath;
break;
}
-
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, call_kind);
- if (call_kind != LocationSummary::kCall) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86 uses this register too.
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this "temp" register too.
+ locations->AddTemp(Location::RequiresRegister());
+ // When read barriers are enabled, we need an additional temporary
+ // register for some cases.
+ if (kEmitCompilerReadBarrier &&
+ (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
locations->AddTemp(Location::RequiresRegister());
- } else {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Location obj_loc = locations->InAt(0);
+ Register obj = obj_loc.AsRegister<Register>();
Location cls = locations->InAt(1);
- Register temp = locations->WillCall()
- ? kNoRegister
- : locations->GetTemp(0).AsRegister<Register>();
-
+ Location temp_loc = locations->GetTemp(0);
+ Register temp = temp_loc.AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- SlowPathCode* slow_path = nullptr;
- if (!locations->WillCall()) {
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, !locations->CanCall());
- codegen_->AddSlowPath(slow_path);
- }
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool is_type_check_slow_path_fatal =
+ (type_check_kind == TypeCheckKind::kExactCheck ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+ !instruction->CanThrowIntoCatchBlock();
+ SlowPathCode* type_check_slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
+ is_type_check_slow_path_fatal);
+ codegen_->AddSlowPath(type_check_slow_path);
- NearLabel done, abstract_entry;
+ NearLabel done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ testl(obj, obj);
__ j(kEqual, &done);
}
- if (locations->WillCall()) {
- __ movl(obj, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(obj);
- } else {
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- }
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
- switch (instruction->GetTypeCheckKind()) {
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
if (cls.IsRegister()) {
@@ -5536,19 +5977,44 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
}
// Jump to slow path for throwing the exception or doing a
// more involved array check.
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop, success;
+ NearLabel loop, compare_classes;
__ Bind(&loop);
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp2 = temp2_loc.AsRegister<Register>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->super_class_
__ movl(temp, Address(temp, super_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+
+ // If the class reference currently in `temp` is not null, jump
+ // to the `compare_classes` label to compare it with the checked
+ // class.
__ testl(temp, temp);
- // Jump to the slow path to throw the exception.
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, &compare_classes);
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
+
+ __ Bind(&compare_classes);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -5558,6 +6024,7 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, &loop);
break;
}
+
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
NearLabel loop;
@@ -5569,16 +6036,39 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
__ cmpl(temp, Address(ESP, cls.GetStackIndex()));
}
__ j(kEqual, &done);
+
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp2 = temp2_loc.AsRegister<Register>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->super_class_
__ movl(temp, Address(temp, super_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+
+ // If the class reference currently in `temp` is not null, jump
+ // back at the beginning of the loop.
__ testl(temp, temp);
__ j(kNotEqual, &loop);
- // Jump to the slow path to throw the exception.
- __ jmp(slow_path->GetEntryLabel());
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
+ NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -5586,29 +6076,67 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
__ cmpl(temp, Address(ESP, cls.GetStackIndex()));
}
__ j(kEqual, &done);
- // Otherwise, we need to check that the object's class is a non primitive array.
+
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ Register temp2 = temp2_loc.AsRegister<Register>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->component_type_
__ movl(temp, Address(temp, component_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(
+ instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+
+ // If the component type is not null (i.e. the object is indeed
+ // an array), jump to label `check_non_primitive_component_type`
+ // to further check that this component type is not a primitive
+ // type.
__ testl(temp, temp);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, &check_non_primitive_component_type);
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
+
+ __ Bind(&check_non_primitive_component_type);
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, &done);
+ // Same comment as above regarding `temp` and the slow path.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
- default:
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ // We always go into the type check slow path for the unresolved &
+ // interface check cases.
+ //
+ // We cannot directly call the CheckCast runtime entry point
+ // without resorting to a type checking slow path here (i.e. by
+ // calling InvokeRuntime directly), as it would require to
+ // assign fixed registers for the inputs of this HInstanceOf
+ // instruction (following the runtime calling convention), which
+ // might be cluttered by the potential first read barrier
+ // emission at the beginning of this method.
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
__ Bind(&done);
- if (slow_path != nullptr) {
- __ Bind(slow_path->GetExitLabel());
- }
+ __ Bind(type_check_slow_path->GetExitLabel());
}
void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -5759,6 +6287,82 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr
}
}
+void CodeGeneratorX86::GenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // If heap poisoning is enabled, the unpoisoning of the loaded
+ // reference will be carried out by the runtime within the slow
+ // path.
+ //
+ // Note that `ref` currently does not get unpoisoned (when heap
+ // poisoning is enabled), which is alright as the `ref` argument is
+ // not used by the artReadBarrierSlow entry point.
+ //
+ // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
+ SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
+ AddSlowPath(slow_path);
+
+ // TODO: When read barrier has a fast path, add it here.
+ /* Currently the read barrier call is inserted after the original load.
+ * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the
+ * original load. This load-load ordering is required by the read barrier.
+ * The fast path/slow path (for Baker's algorithm) should look like:
+ *
+ * bool isGray = obj.LockWord & kReadBarrierMask;
+ * lfence; // load fence or artificial data dependence to prevent load-load reordering
+ * ref = obj.field; // this is the original load
+ * if (isGray) {
+ * ref = Mark(ref); // ideally the slow path just does Mark(ref)
+ * }
+ */
+
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorX86::MaybeGenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ if (kEmitCompilerReadBarrier) {
+ // If heap poisoning is enabled, unpoisoning will be taken care of
+ // by the runtime within the slow path.
+ GenerateReadBarrier(instruction, out, ref, obj, offset, index);
+ } else if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(out.AsRegister<Register>());
+ }
+}
+
+void CodeGeneratorX86::GenerateReadBarrierForRoot(HInstruction* instruction,
+ Location out,
+ Location root) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Note that GC roots are not affected by heap poisoning, so we do
+ // not need to do anything special for this here.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ AddSlowPath(slow_path);
+
+ // TODO: Implement a fast path for ReadBarrierForRoot, performing
+ // the following operation (for Baker's algorithm):
+ //
+ // if (thread.tls32_.is_gc_marking) {
+ // root = Mark(root);
+ // }
+
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
LOG(FATAL) << "Unreachable";
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ac3d06c23d..cd606f697e 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -397,7 +397,64 @@ class CodeGeneratorX86 : public CodeGenerator {
void Finalize(CodeAllocator* allocator) OVERRIDE;
+ // Generate a read barrier for a heap reference within `instruction`.
+ //
+ // A read barrier for an object reference read from the heap is
+ // implemented as a call to the artReadBarrierSlow runtime entry
+ // point, which is passed the values in locations `ref`, `obj`, and
+ // `offset`:
+ //
+ // mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+ // mirror::Object* obj,
+ // uint32_t offset);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierSlow.
+ //
+ // When `index` is provided (i.e. for array accesses), the offset
+ // value passed to artReadBarrierSlow is adjusted to take `index`
+ // into account.
+ void GenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // If read barriers are enabled, generate a read barrier for a heap reference.
+ // If heap poisoning is enabled, also unpoison the reference in `out`.
+ void MaybeGenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // Generate a read barrier for a GC root within `instruction`.
+ //
+ // A read barrier for an object reference GC root is implemented as
+ // a call to the artReadBarrierForRootSlow runtime entry point,
+ // which is passed the value in location `root`:
+ //
+ // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierForRootSlow.
+ void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
+
private:
+ Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
+
+ struct PcRelativeDexCacheAccessInfo {
+ PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
+ : target_dex_file(dex_file), element_offset(element_off), label() { }
+
+ const DexFile& target_dex_file;
+ uint32_t element_offset;
+ // NOTE: Label is bound to the end of the instruction that has an embedded 32-bit offset.
+ Label label;
+ };
+
// Labels for each block that will be compiled.
Label* block_labels_; // Indexed by block id.
Label frame_entry_label_;
@@ -410,6 +467,8 @@ class CodeGeneratorX86 : public CodeGenerator {
// Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
ArenaDeque<MethodPatchInfo<Label>> method_patches_;
ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ // PC-relative DexCache access info.
+ ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
// Offset to the start of the constant area in the assembled code.
// Used for fixups to the constant area.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ee8a299c5e..d55c084618 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -34,6 +34,9 @@
namespace art {
+template<class MirrorType>
+class GcRoot;
+
namespace x86_64 {
static constexpr int kCurrentMethodStackOffset = 0;
@@ -52,16 +55,16 @@ class NullCheckSlowPathX86_64 : public SlowPathCode {
explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
bool IsFatal() const OVERRIDE { return true; }
@@ -78,16 +81,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
bool IsFatal() const OVERRIDE { return true; }
@@ -139,18 +142,18 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
: instruction_(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
- __ jmp(x64_codegen->GetLabelOf(successor_));
+ __ jmp(x86_64_codegen->GetLabelOf(successor_));
}
}
@@ -180,7 +183,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
@@ -196,8 +199,10 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
- instruction_, instruction_->GetDexPc(), this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
}
bool IsFatal() const OVERRIDE { return true; }
@@ -222,22 +227,25 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = at_->GetLocations();
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
- x64_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
- : QUICK_ENTRY_POINT(pInitializeType),
- at_, dex_pc_, this);
+ x86_64_codegen->InvokeRuntime(do_clinit_ ?
+ QUICK_ENTRY_POINT(pInitializeStaticStorage) :
+ QUICK_ENTRY_POINT(pInitializeType),
+ at_,
+ dex_pc_,
+ this);
Location out = locations->Out();
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- x64_codegen->Move(out, Location::RegisterLocation(RAX));
+ x86_64_codegen->Move(out, Location::RegisterLocation(RAX));
}
RestoreLiveRegisters(codegen, locations);
@@ -271,18 +279,18 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(instruction_->GetStringIndex()));
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
- instruction_,
- instruction_->GetDexPc(),
- this);
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
}
@@ -308,18 +316,9 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->IsCheckCast()) {
- // The codegen for the instruction overwrites `temp`, so put it back in place.
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- }
-
if (!is_fatal_) {
SaveLiveRegisters(codegen, locations);
}
@@ -336,21 +335,24 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction_,
- dex_pc,
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc,
+ this);
+ CheckEntrypointTypes<
+ kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction_,
- dex_pc,
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc,
+ this);
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
if (!is_fatal_) {
if (instruction_->IsInstanceOf()) {
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
}
RestoreLiveRegisters(codegen, locations);
@@ -375,15 +377,15 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
DCHECK(instruction_->IsDeoptimize());
HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- deoptimize,
- deoptimize->GetDexPc(),
- this);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ deoptimize,
+ deoptimize->GetDexPc(),
+ this);
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
@@ -421,11 +423,11 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
nullptr);
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
- x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
RestoreLiveRegisters(codegen, locations);
__ jmp(GetExitLabel());
}
@@ -438,6 +440,219 @@ class ArraySetSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
};
+// Slow path generating a read barrier for a heap reference.
+class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
+ public:
+ ReadBarrierForHeapReferenceSlowPathX86_64(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index)
+ : instruction_(instruction),
+ out_(out),
+ ref_(ref),
+ obj_(obj),
+ offset_(offset),
+ index_(index) {
+ DCHECK(kEmitCompilerReadBarrier);
+ // If `obj` is equal to `out` or `ref`, it means the initial
+ // object has been overwritten by (or after) the heap object
+ // reference load to be instrumented, e.g.:
+ //
+ // __ movl(out, Address(out, offset));
+ // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset);
+ //
+ // In that case, we have lost the information about the original
+ // object, and the emitted read barrier cannot work properly.
+ DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
+ DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
+}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ LocationSummary* locations = instruction_->GetLocations();
+ CpuRegister reg_out = out_.AsRegister<CpuRegister>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.AsRegister())) << out_;
+ DCHECK(!instruction_->IsInvoke() ||
+ (instruction_->IsInvokeStaticOrDirect() &&
+ instruction_->GetLocations()->Intrinsified()));
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We may have to change the index's value, but as `index_` is a
+ // constant member (like other "inputs" of this slow path),
+ // introduce a copy of it, `index`.
+ Location index = index_;
+ if (index_.IsValid()) {
+ // Handle `index_` for HArrayGet and intrinsic UnsafeGetObject.
+ if (instruction_->IsArrayGet()) {
+ // Compute real offset and store it in index_.
+ Register index_reg = index_.AsRegister<CpuRegister>().AsRegister();
+ DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
+ if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
+ // We are about to change the value of `index_reg` (see the
+ // calls to art::x86_64::X86_64Assembler::shll and
+ // art::x86_64::X86_64Assembler::AddImmediate below), but it
+ // has not been saved by the previous call to
+ // art::SlowPathCode::SaveLiveRegisters, as it is a
+ // callee-save register --
+ // art::SlowPathCode::SaveLiveRegisters does not consider
+ // callee-save registers, as it has been designed with the
+ // assumption that callee-save registers are supposed to be
+ // handled by the called function. So, as a callee-save
+ // register, `index_reg` _would_ eventually be saved onto
+ // the stack, but it would be too late: we would have
+ // changed its value earlier. Therefore, we manually save
+ // it here into another freely available register,
+ // `free_reg`, chosen of course among the caller-save
+ // registers (as a callee-save `free_reg` register would
+ // exhibit the same problem).
+ //
+ // Note we could have requested a temporary register from
+ // the register allocator instead; but we prefer not to, as
+ // this is a slow path, and we know we can find a
+ // caller-save register that is available.
+ Register free_reg = FindAvailableCallerSaveRegister(codegen).AsRegister();
+ __ movl(CpuRegister(free_reg), CpuRegister(index_reg));
+ index_reg = free_reg;
+ index = Location::RegisterLocation(index_reg);
+ } else {
+ // The initial register stored in `index_` has already been
+ // saved in the call to art::SlowPathCode::SaveLiveRegisters
+ // (as it is not a callee-save register), so we can freely
+ // use it.
+ }
+ // Shifting the index value contained in `index_reg` by the
+ // scale factor (2) cannot overflow in practice, as the
+ // runtime is unable to allocate object arrays with a size
+ // larger than 2^26 - 1 (that is, 2^28 - 4 bytes).
+ __ shll(CpuRegister(index_reg), Immediate(TIMES_4));
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ __ AddImmediate(CpuRegister(index_reg), Immediate(offset_));
+ } else {
+ DCHECK(instruction_->IsInvoke());
+ DCHECK(instruction_->GetLocations()->Intrinsified());
+ DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
+ (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
+ << instruction_->AsInvoke()->GetIntrinsic();
+ DCHECK_EQ(offset_, 0U);
+ DCHECK(index_.IsRegister());
+ }
+ }
+
+ // We're moving two or three locations to locations that could
+ // overlap, so we need a parallel move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(ref_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(obj_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot,
+ nullptr);
+ if (index.IsValid()) {
+ parallel_move.AddMove(index,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimInt,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ } else {
+ codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(2)), Immediate(offset_));
+ }
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<
+ kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
+ x86_64_codegen->Move(out_, Location::RegisterLocation(RAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE {
+ return "ReadBarrierForHeapReferenceSlowPathX86_64";
+ }
+
+ private:
+ CpuRegister FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
+ size_t ref = static_cast<int>(ref_.AsRegister<CpuRegister>().AsRegister());
+ size_t obj = static_cast<int>(obj_.AsRegister<CpuRegister>().AsRegister());
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
+ return static_cast<CpuRegister>(i);
+ }
+ }
+ // We shall never fail to find a free caller-save register, as
+ // there are more than two core caller-save registers on x86-64
+ // (meaning it is possible to find one which is different from
+ // `ref` and `obj`).
+ DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
+ LOG(FATAL) << "Could not find a free caller-save register";
+ UNREACHABLE();
+ }
+
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location ref_;
+ const Location obj_;
+ const uint32_t offset_;
+ // An additional location containing an index to an array.
+ // Only used for HArrayGet and the UnsafeGetObject &
+ // UnsafeGetObjectVolatile intrinsics.
+ const Location index_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathX86_64);
+};
+
+// Slow path generating a read barrier for a GC root.
+class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode {
+ public:
+ ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root)
+ : instruction_(instruction), out_(out), root_(root) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString());
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ x86_64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
+ x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
+ x86_64_codegen->Move(out_, Location::RegisterLocation(RAX));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; }
+
+ private:
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location root_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86_64);
+};
+
#undef __
#define __ down_cast<X86_64Assembler*>(GetAssembler())->
@@ -514,12 +729,12 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
- invoke->GetDexCacheArrayOffset());
+ pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetDexCacheArrayOffset());
__ movq(temp.AsRegister<CpuRegister>(),
Address::Absolute(kDummy32BitOffset, false /* no_rip */));
// Bind the label at the end of the "movl" insn.
- __ Bind(&pc_rel_dex_cache_patches_.back().label);
+ __ Bind(&pc_relative_dex_cache_patches_.back().label);
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
@@ -533,7 +748,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
method_reg = reg.AsRegister();
__ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
- // temp = temp->dex_cache_resolved_methods_;
+ // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ movq(reg,
Address(CpuRegister(method_reg),
ArtMethod::DexCacheResolvedMethodsOffset(kX86_64PointerSize).SizeValue()));
@@ -578,10 +793,17 @@ void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
- // temp = object->GetClass();
DCHECK(receiver.IsRegister());
+ // /* HeapReference<Class> */ temp = receiver->klass_
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
__ movq(temp, Address(temp, method_offset));
@@ -593,28 +815,27 @@ void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t
void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
size_t size =
- method_patches_.size() + relative_call_patches_.size() + pc_rel_dex_cache_patches_.size();
+ method_patches_.size() +
+ relative_call_patches_.size() +
+ pc_relative_dex_cache_patches_.size();
linker_patches->reserve(size);
+ // The label points to the end of the "movl" insn but the literal offset for method
+ // patch needs to point to the embedded constant which occupies the last 4 bytes.
+ constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
for (const MethodPatchInfo<Label>& info : method_patches_) {
- // The label points to the end of the "movl" instruction but the literal offset for method
- // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
- uint32_t literal_offset = info.label.Position() - 4;
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
info.target_method.dex_file,
info.target_method.dex_method_index));
}
for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
- // The label points to the end of the "call" instruction but the literal offset for method
- // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
- uint32_t literal_offset = info.label.Position() - 4;
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
info.target_method.dex_file,
info.target_method.dex_method_index));
}
- for (const PcRelativeDexCacheAccessInfo& info : pc_rel_dex_cache_patches_) {
- // The label points to the end of the "mov" instruction but the literal offset for method
- // patch x86 needs to point to the embedded constant which occupies the last 4 bytes.
- uint32_t literal_offset = info.label.Position() - 4;
+ for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(literal_offset,
&info.target_dex_file,
info.label.Position(),
@@ -673,9 +894,9 @@ static constexpr int kNumberOfCpuRegisterPairs = 0;
// Use a fake return address register to mimic Quick.
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
- const X86_64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options,
- OptimizingCompilerStats* stats)
+ const X86_64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCpuRegisters,
kNumberOfFloatRegisters,
@@ -695,7 +916,7 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -729,7 +950,7 @@ Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type) const {
LOG(FATAL) << "Unreachable type " << type;
}
- return Location();
+ return Location::NoLocation();
}
void CodeGeneratorX86_64::SetupBlockedRegisters(bool is_baseline) const {
@@ -1837,7 +2058,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Typ
LOG(FATAL) << "Unexpected parameter type " << type;
break;
}
- return Location();
+ return Location::NoLocation();
}
void LocationsBuilderX86_64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
@@ -1908,7 +2129,6 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
-
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1921,31 +2141,41 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
+ LocationSummary* locations = invoke->GetLocations();
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister hidden_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
- LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
- // Set the hidden argument.
- CpuRegister hidden_reg = invoke->GetLocations()->GetTemp(1).AsRegister<CpuRegister>();
+ // Set the hidden argument. This is safe to do this here, as RAX
+ // won't be modified thereafter, before the `call` instruction.
+ DCHECK_EQ(RAX, hidden_reg.AsRegister());
codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex());
- // temp = object->GetClass();
if (receiver.IsStackSlot()) {
__ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ // /* HeapReference<Class> */ temp = temp->klass_
__ movl(temp, Address(temp, class_offset));
} else {
+ // /* HeapReference<Class> */ temp = receiver->klass_
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Instead of simply (possibly) unpoisoning `temp` here, we should
+ // emit a read barrier for the previous class reference load.
+ // However this is not required in practice, as this is an
+ // intermediate/temporary reference and because the current
+ // concurrent copying collector keeps the from-space memory
+ // intact/accessible until the end of the marking phase (the
+ // concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ __ call(Address(temp,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -3687,13 +3917,23 @@ void InstructionCodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind)
void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ bool object_field_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_field_get_with_read_barrier ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister());
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps for an object field get when read barriers
+ // are enabled: we do not want the move to overwrite the object's
+ // location, as we need it to emit the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
}
@@ -3702,7 +3942,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- CpuRegister base = locations->InAt(0).AsRegister<CpuRegister>();
+ Location base_loc = locations->InAt(0);
+ CpuRegister base = base_loc.AsRegister<CpuRegister>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
Primitive::Type field_type = field_info.GetFieldType();
@@ -3762,7 +4003,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
}
if (field_type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(out.AsRegister<CpuRegister>());
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset);
}
}
@@ -4080,20 +4321,31 @@ void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) {
}
void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
+ bool object_array_get_with_read_barrier =
+ kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(instruction,
+ object_array_get_with_read_barrier ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ // The output overlaps for an object array get when read barriers
+ // are enabled: we do not want the move to overwrite the array's
+ // location, as we need it to emit the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
}
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location index = locations->InAt(1);
Primitive::Type type = instruction->GetType();
@@ -4148,8 +4400,9 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
if (index.IsConstant()) {
@@ -4204,8 +4457,17 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
if (type == Primitive::kPrimNot) {
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- __ MaybeUnpoisonHeapReference(out);
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Location out = locations->Out();
+ if (index.IsConstant()) {
+ uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset);
+ } else {
+ codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index);
+ }
}
}
@@ -4215,10 +4477,14 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call = instruction->NeedsTypeCheck();
+ bool object_array_set_with_read_barrier =
+ kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+ (may_need_runtime_call || object_array_set_with_read_barrier) ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -4230,18 +4496,24 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
if (needs_write_barrier) {
// Temporary registers for the write barrier.
- locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
+
+ // This first temporary register is possibly used for heap
+ // reference poisoning and/or read barrier emission too.
+ locations->AddTemp(Location::RequiresRegister());
+ // This second temporary register is possibly used for read
+ // barrier emission too.
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister array = locations->InAt(0).AsRegister<CpuRegister>();
+ Location array_loc = locations->InAt(0);
+ CpuRegister array = array_loc.AsRegister<CpuRegister>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
- bool may_need_runtime_call = locations->CanCall();
+ bool may_need_runtime_call = instruction->NeedsTypeCheck();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -4285,6 +4557,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Address address = index.IsConstant()
? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
: Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+
if (!value.IsRegister()) {
// Just setting null.
DCHECK(instruction->InputAt(2)->IsNullConstant());
@@ -4313,22 +4586,62 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ Bind(&not_null);
}
- __ movl(temp, Address(array, class_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ MaybeUnpoisonHeapReference(temp);
- __ movl(temp, Address(temp, component_offset));
- // No need to poison/unpoison, we're comparing two poisoned references.
- __ cmpl(temp, Address(register_value, class_offset));
- if (instruction->StaticTypeOfArrayIsObjectArray()) {
- __ j(kEqual, &do_put);
- __ MaybeUnpoisonHeapReference(temp);
- __ movl(temp, Address(temp, super_offset));
- // No need to unpoison the result, we're comparing against null.
- __ testl(temp, temp);
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(&do_put);
+ if (kEmitCompilerReadBarrier) {
+ // When read barriers are enabled, the type checking
+ // instrumentation requires two read barriers:
+ //
+ // __ movl(temp2, temp);
+ // // /* HeapReference<Class> */ temp = temp->component_type_
+ // __ movl(temp, Address(temp, component_offset));
+ // codegen_->GenerateReadBarrier(
+ // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = register_value->klass_
+ // __ movl(temp2, Address(register_value, class_offset));
+ // codegen_->GenerateReadBarrier(
+ // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
+ //
+ // __ cmpl(temp, temp2);
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled.
+ __ jmp(slow_path->GetEntryLabel());
} else {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ // /* HeapReference<Class> */ temp = array->klass_
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ __ movl(temp, Address(temp, component_offset));
+ // If heap poisoning is enabled, no need to unpoison `temp`
+ // nor the object reference in `register_value->klass`, as
+ // we are comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // If heap poisoning is enabled, the `temp` reference has
+ // not been unpoisoned yet; unpoison it now.
+ __ MaybeUnpoisonHeapReference(temp);
+
+ // /* HeapReference<Class> */ temp = temp->super_class_
+ __ movl(temp, Address(temp, super_offset));
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ testl(temp, temp);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
}
}
@@ -4354,6 +4667,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
break;
}
+
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
Address address = index.IsConstant()
@@ -4803,7 +5117,8 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
CodeGenerator::CreateLoadClassLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(RAX));
+ Location::RegisterLocation(RAX),
+ /* code_generator_supports_read_barrier */ true);
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
@@ -4817,18 +5132,40 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
return;
}
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ Location out_loc = locations->Out();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
+
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
+ uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
+ __ leaq(out, Address(current_method, declaring_class_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ __ movl(out, Address(current_method, declaring_class_offset));
+ }
} else {
DCHECK(cls->CanCallRuntime());
- __ movq(out, Address(
- current_method, ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
- __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
- // TODO: We will need a read barrier here.
+ // /* GcRoot<mirror::Class>[] */ out =
+ // current_method.ptr_sized_fields_->dex_cache_resolved_types_
+ __ movq(out, Address(current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
+
+ size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &out[type_index]
+ __ leaq(out, Address(out, cache_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ __ movl(out, Address(out, cache_offset));
+ }
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4873,12 +5210,35 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = load->GetLocations();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ Location out_loc = locations->Out();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
- __ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
- __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
- // TODO: We will need a read barrier here.
+
+ uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
+ __ leaq(out, Address(current_method, declaring_class_offset));
+ // /* mirror::Class* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ __ movl(out, Address(current_method, declaring_class_offset));
+ }
+
+ // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
+ __ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Uint32Value()));
+
+ size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex());
+ if (kEmitCompilerReadBarrier) {
+ // /* GcRoot<mirror::String>* */ out = &out[string_index]
+ __ leaq(out, Address(out, cache_offset));
+ // /* mirror::String* */ out = out->Read()
+ codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
+ } else {
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ __ movl(out, Address(out, cache_offset));
+ }
+
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -4922,40 +5282,44 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- switch (instruction->GetTypeCheckKind()) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = LocationSummary::kNoCall;
+ call_kind =
+ kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCall;
- break;
- case TypeCheckKind::kArrayCheck:
call_kind = LocationSummary::kCallOnSlowPath;
break;
}
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- if (call_kind != LocationSummary::kCall) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86_64 uses this register too.
- locations->SetOut(Location::RequiresRegister());
- } else {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(Location::RegisterLocation(RAX));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this "out" register too.
+ locations->SetOut(Location::RequiresRegister());
+ // When read barriers are enabled, we need a temporary register for
+ // some cases.
+ if (kEmitCompilerReadBarrier &&
+ (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ Location out_loc = locations->Out();
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -4970,15 +5334,9 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface/unresolved check, we put the object class into the object register.
- // This is safe, as the register is caller-save, and the object must be in another
- // register if it survives the runtime call.
- CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
- (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
- ? obj
- : out;
- __ movl(target, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(target);
+ // /* HeapReference<Class> */ out = obj->klass_
+ __ movl(out, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset);
switch (instruction->GetTypeCheckKind()) {
case TypeCheckKind::kExactCheck: {
@@ -5000,13 +5358,23 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop, success;
__ Bind(&loop);
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->super_class_
__ movl(out, Address(out, super_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5023,6 +5391,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
NearLabel loop, success;
@@ -5034,8 +5403,17 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kEqual, &success);
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->super_class_
__ movl(out, Address(out, super_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
__ testl(out, out);
__ j(kNotEqual, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
@@ -5047,6 +5425,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
NearLabel exact_check;
@@ -5057,9 +5436,18 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kEqual, &exact_check);
- // Otherwise, we need to check that the object's class is a non primitive array.
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+ __ movl(temp, out);
+ }
+ // /* HeapReference<Class> */ out = out->component_type_
__ movl(out, Address(out, component_offset));
- __ MaybeUnpoisonHeapReference(out);
+ codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset);
__ testl(out, out);
// If `out` is null, we use it for the result, and jump to `done`.
__ j(kEqual, &done);
@@ -5070,6 +5458,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ jmp(&done);
break;
}
+
case TypeCheckKind::kArrayCheck: {
if (cls.IsRegister()) {
__ cmpl(out, cls.AsRegister<CpuRegister>());
@@ -5078,8 +5467,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -5088,13 +5477,25 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
+
case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- default: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ case TypeCheckKind::kInterfaceCheck: {
+ // Note that we indeed only call on slow path, but we always go
+ // into the slow path for the unresolved & interface check
+ // cases.
+ //
+ // We cannot directly call the InstanceofNonTrivial runtime
+ // entry point without resorting to a type checking slow path
+ // here (i.e. by calling InvokeRuntime directly), as it would
+ // require to assign fixed registers for the inputs of this
+ // HInstanceOf instruction (following the runtime calling
+ // convention), which might be cluttered by the potential first
+ // read barrier emission at the beginning of this method.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
__ jmp(&done);
}
@@ -5119,58 +5520,60 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
- switch (instruction->GetTypeCheckKind()) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck:
- call_kind = throws_into_catch
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
+ call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
break;
+ case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCall;
- break;
- case TypeCheckKind::kArrayCheck:
call_kind = LocationSummary::kCallOnSlowPath;
break;
}
-
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, call_kind);
- if (call_kind != LocationSummary::kCall) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86_64 uses this register too.
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this "temp" register too.
+ locations->AddTemp(Location::RequiresRegister());
+ // When read barriers are enabled, we need an additional temporary
+ // register for some cases.
+ if (kEmitCompilerReadBarrier &&
+ (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
locations->AddTemp(Location::RequiresRegister());
- } else {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ Location obj_loc = locations->InAt(0);
+ CpuRegister obj = obj_loc.AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
- CpuRegister temp = locations->WillCall()
- ? CpuRegister(kNoRegister)
- : locations->GetTemp(0).AsRegister<CpuRegister>();
-
+ Location temp_loc = locations->GetTemp(0);
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- SlowPathCode* slow_path = nullptr;
- if (!locations->WillCall()) {
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, !locations->CanCall());
- codegen_->AddSlowPath(slow_path);
- }
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+ bool is_type_check_slow_path_fatal =
+ (type_check_kind == TypeCheckKind::kExactCheck ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+ !instruction->CanThrowIntoCatchBlock();
+ SlowPathCode* type_check_slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
+ is_type_check_slow_path_fatal);
+ codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
// Avoid null check if we know obj is not null.
@@ -5179,15 +5582,11 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kEqual, &done);
}
- if (locations->WillCall()) {
- __ movl(obj, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(obj);
- } else {
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- }
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
- switch (instruction->GetTypeCheckKind()) {
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
if (cls.IsRegister()) {
@@ -5198,19 +5597,44 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
}
// Jump to slow path for throwing the exception or doing a
// more involved array check.
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop;
+ NearLabel loop, compare_classes;
__ Bind(&loop);
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->super_class_
__ movl(temp, Address(temp, super_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+
+ // If the class reference currently in `temp` is not null, jump
+ // to the `compare_classes` label to compare it with the checked
+ // class.
__ testl(temp, temp);
- // Jump to the slow path to throw the exception.
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, &compare_classes);
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
+
+ __ Bind(&compare_classes);
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -5220,6 +5644,7 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, &loop);
break;
}
+
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
NearLabel loop;
@@ -5231,16 +5656,39 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kEqual, &done);
+
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->super_class_
__ movl(temp, Address(temp, super_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+
+ // If the class reference currently in `temp` is not null, jump
+ // back at the beginning of the loop.
__ testl(temp, temp);
__ j(kNotEqual, &loop);
- // Jump to the slow path to throw the exception.
- __ jmp(slow_path->GetEntryLabel());
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
+ NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -5248,29 +5696,67 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kEqual, &done);
- // Otherwise, we need to check that the object's class is a non primitive array.
+
+ // Otherwise, we need to check that the object's class is a non-primitive array.
+ Location temp2_loc =
+ kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
+ if (kEmitCompilerReadBarrier) {
+ // Save the value of `temp` into `temp2` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
+ __ movl(temp2, temp);
+ }
+ // /* HeapReference<Class> */ temp = temp->component_type_
__ movl(temp, Address(temp, component_offset));
- __ MaybeUnpoisonHeapReference(temp);
+ codegen_->MaybeGenerateReadBarrier(
+ instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+
+ // If the component type is not null (i.e. the object is indeed
+ // an array), jump to label `check_non_primitive_component_type`
+ // to further check that this component type is not a primitive
+ // type.
__ testl(temp, temp);
- __ j(kEqual, slow_path->GetEntryLabel());
+ __ j(kNotEqual, &check_non_primitive_component_type);
+ // Otherwise, jump to the slow path to throw the exception.
+ //
+ // But before, move back the object's class into `temp` before
+ // going into the slow path, as it has been overwritten in the
+ // meantime.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
+
+ __ Bind(&check_non_primitive_component_type);
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kEqual, &done);
+ // Same comment as above regarding `temp` and the slow path.
+ // /* HeapReference<Class> */ temp = obj->klass_
+ __ movl(temp, Address(obj, class_offset));
+ codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
+
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
- default:
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ // We always go into the type check slow path for the unresolved &
+ // interface check cases.
+ //
+ // We cannot directly call the CheckCast runtime entry point
+ // without resorting to a type checking slow path here (i.e. by
+ // calling InvokeRuntime directly), as it would require to
+ // assign fixed registers for the inputs of this HInstanceOf
+ // instruction (following the runtime calling convention), which
+ // might be cluttered by the potential first read barrier
+ // emission at the beginning of this method.
+ __ jmp(type_check_slow_path->GetEntryLabel());
break;
}
__ Bind(&done);
- if (slow_path != nullptr) {
- __ Bind(slow_path->GetExitLabel());
- }
+ __ Bind(type_check_slow_path->GetExitLabel());
}
void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -5403,6 +5889,82 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
}
}
+void CodeGeneratorX86_64::GenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // If heap poisoning is enabled, the unpoisoning of the loaded
+ // reference will be carried out by the runtime within the slow
+ // path.
+ //
+ // Note that `ref` currently does not get unpoisoned (when heap
+ // poisoning is enabled), which is alright as the `ref` argument is
+ // not used by the artReadBarrierSlow entry point.
+ //
+ // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
+ SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
+ AddSlowPath(slow_path);
+
+ // TODO: When read barrier has a fast path, add it here.
+ /* Currently the read barrier call is inserted after the original load.
+ * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the
+ * original load. This load-load ordering is required by the read barrier.
+ * The fast path/slow path (for Baker's algorithm) should look like:
+ *
+ * bool isGray = obj.LockWord & kReadBarrierMask;
+ * lfence; // load fence or artificial data dependence to prevent load-load reordering
+ * ref = obj.field; // this is the original load
+ * if (isGray) {
+ * ref = Mark(ref); // ideally the slow path just does Mark(ref)
+ * }
+ */
+
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorX86_64::MaybeGenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
+ if (kEmitCompilerReadBarrier) {
+ // If heap poisoning is enabled, unpoisoning will be taken care of
+ // by the runtime within the slow path.
+ GenerateReadBarrier(instruction, out, ref, obj, offset, index);
+ } else if (kPoisonHeapReferences) {
+ __ UnpoisonHeapReference(out.AsRegister<CpuRegister>());
+ }
+}
+
+void CodeGeneratorX86_64::GenerateReadBarrierForRoot(HInstruction* instruction,
+ Location out,
+ Location root) {
+ DCHECK(kEmitCompilerReadBarrier);
+
+ // Note that GC roots are not affected by heap poisoning, so we do
+ // not need to do anything special for this here.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ AddSlowPath(slow_path);
+
+ // TODO: Implement a fast path for ReadBarrierForRoot, performing
+ // the following operation (for Baker's algorithm):
+ //
+ // if (thread.tls32_.is_gc_marking) {
+ // root = Mark(root);
+ // }
+
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
LOG(FATAL) << "Unreachable";
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 7a52473408..5791fcd0e6 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -352,6 +352,51 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return isa_features_;
}
+ // Generate a read barrier for a heap reference within `instruction`.
+ //
+ // A read barrier for an object reference read from the heap is
+ // implemented as a call to the artReadBarrierSlow runtime entry
+ // point, which is passed the values in locations `ref`, `obj`, and
+ // `offset`:
+ //
+ // mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+ // mirror::Object* obj,
+ // uint32_t offset);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierSlow.
+ //
+ // When `index` provided (i.e., when it is different from
+ // Location::NoLocation()), the offset value passed to
+ // artReadBarrierSlow is adjusted to take `index` into account.
+ void GenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // If read barriers are enabled, generate a read barrier for a heap reference.
+ // If heap poisoning is enabled, also unpoison the reference in `out`.
+ void MaybeGenerateReadBarrier(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // Generate a read barrier for a GC root within `instruction`.
+ //
+ // A read barrier for an object reference GC root is implemented as
+ // a call to the artReadBarrierForRootSlow runtime entry point,
+ // which is passed the value in location `root`:
+ //
+ // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
+ //
+ // The `out` location contains the value returned by
+ // artReadBarrierForRootSlow.
+ void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
+
int ConstantAreaStart() const {
return constant_area_start_;
}
@@ -401,7 +446,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
ArenaDeque<MethodPatchInfo<Label>> method_patches_;
ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
// PC-relative DexCache access info.
- ArenaDeque<PcRelativeDexCacheAccessInfo> pc_rel_dex_cache_patches_;
+ ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index df2013bef4..0363f203b2 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -148,7 +148,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
return nullptr;
- } else if (resolved_method->IsAbstract()) {
+ } else if (!resolved_method->IsInvokable()) {
// The information we had on the receiver was not enough to find
// the target method. Since we check above the exact type of the receiver,
// the only reason this can happen is an IncompatibleClassChangeError.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 040bf6a45e..371588fc47 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -55,7 +55,23 @@ ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
- return res != nullptr && res->Intrinsified();
+ if (res == nullptr) {
+ return false;
+ }
+ if (kEmitCompilerReadBarrier && res->CanCall()) {
+ // Generating an intrinsic for this HInvoke may produce an
+ // IntrinsicSlowPathX86 slow path. Currently this approach
+ // does not work when using read barriers, as the emitted
+ // calling sequence will make use of another slow path
+ // (ReadBarrierForRootSlowPathX86 for HInvokeStaticOrDirect,
+ // ReadBarrierSlowPathX86 for HInvokeVirtual). So we bail
+ // out in this case.
+ //
+ // TODO: Find a way to have intrinsics work with read barriers.
+ invoke->SetLocations(nullptr);
+ return false;
+ }
+ return res->Intrinsified();
}
static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -1571,26 +1587,32 @@ void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
}
-static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
- bool is_volatile, X86Assembler* assembler) {
- Register base = locations->InAt(1).AsRegister<Register>();
- Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
- Location output = locations->Out();
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile,
+ CodeGeneratorX86* codegen) {
+ X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
+ LocationSummary* locations = invoke->GetLocations();
+ Location base_loc = locations->InAt(1);
+ Register base = base_loc.AsRegister<Register>();
+ Location offset_loc = locations->InAt(2);
+ Register offset = offset_loc.AsRegisterPairLow<Register>();
+ Location output_loc = locations->Out();
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register output_reg = output.AsRegister<Register>();
- __ movl(output_reg, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ Register output = output_loc.AsRegister<Register>();
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
if (type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(output_reg);
+ codegen->MaybeGenerateReadBarrier(invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
}
break;
}
case Primitive::kPrimLong: {
- Register output_lo = output.AsRegisterPairLow<Register>();
- Register output_hi = output.AsRegisterPairHigh<Register>();
+ Register output_lo = output_loc.AsRegisterPairLow<Register>();
+ Register output_hi = output_loc.AsRegisterPairHigh<Register>();
if (is_volatile) {
// Need to use a XMM to read atomically.
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
@@ -1613,8 +1635,13 @@ static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke,
bool is_long, bool is_volatile) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
+ can_call ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
@@ -1653,22 +1680,22 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke)
void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_);
}
@@ -1890,13 +1917,18 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
- // locked cmpxchg has full barrier semantics, and we don't need
+ // LOCK CMPXCHG has full barrier semantics, and we don't need
// scheduling barriers at this time.
// Convert ZF into the boolean result.
__ setb(kZero, out.AsRegister<Register>());
__ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
+ // In the case of the `UnsafeCASObject` intrinsic, accessing an
+ // object in the heap with LOCK CMPXCHG does not require a read
+ // barrier, as we do not keep a reference to this heap location.
+ // However, if heap poisoning is enabled, we need to unpoison the
+ // values that were poisoned earlier.
if (kPoisonHeapReferences) {
if (base_equals_value) {
// `value` has been moved to a temporary register, no need to
@@ -1929,8 +1961,8 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code
LOG(FATAL) << "Unexpected CAS type " << type;
}
- // locked cmpxchg has full barrier semantics, and we don't need
- // scheduling barriers at this time.
+ // LOCK CMPXCHG/LOCK CMPXCHG8B have full barrier semantics, and we
+ // don't need scheduling barriers at this time.
// Convert ZF into the boolean result.
__ setb(kZero, out.AsRegister<Register>());
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index a29f3ef1d1..2d9f01b821 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -50,8 +50,24 @@ ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
- const LocationSummary* res = invoke->GetLocations();
- return res != nullptr && res->Intrinsified();
+ LocationSummary* res = invoke->GetLocations();
+ if (res == nullptr) {
+ return false;
+ }
+ if (kEmitCompilerReadBarrier && res->CanCall()) {
+ // Generating an intrinsic for this HInvoke may produce an
+ // IntrinsicSlowPathX86_64 slow path. Currently this approach
+ // does not work when using read barriers, as the emitted
+ // calling sequence will make use of another slow path
+ // (ReadBarrierForRootSlowPathX86_64 for HInvokeStaticOrDirect,
+ // ReadBarrierSlowPathX86_64 for HInvokeVirtual). So we bail
+ // out in this case.
+ //
+ // TODO: Find a way to have intrinsics work with read barriers.
+ invoke->SetLocations(nullptr);
+ return false;
+ }
+ return res->Intrinsified();
}
static void MoveArguments(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
@@ -917,6 +933,10 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
}
+// TODO: Implement read barriers in the SystemArrayCopy intrinsic.
+// Note that this code path is not used (yet) because we do not
+// intrinsify methods that can go into the IntrinsicSlowPathX86_64
+// slow path.
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1698,23 +1718,30 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64WordSize>(), true));
}
-static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
- bool is_volatile ATTRIBUTE_UNUSED, X86_64Assembler* assembler) {
- CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
- CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
- CpuRegister trg = locations->Out().AsRegister<CpuRegister>();
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile ATTRIBUTE_UNUSED,
+ CodeGeneratorX86_64* codegen) {
+ X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
+ LocationSummary* locations = invoke->GetLocations();
+ Location base_loc = locations->InAt(1);
+ CpuRegister base = base_loc.AsRegister<CpuRegister>();
+ Location offset_loc = locations->InAt(2);
+ CpuRegister offset = offset_loc.AsRegister<CpuRegister>();
+ Location output_loc = locations->Out();
+ CpuRegister output = locations->Out().AsRegister<CpuRegister>();
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- __ movl(trg, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
if (type == Primitive::kPrimNot) {
- __ MaybeUnpoisonHeapReference(trg);
+ codegen->MaybeGenerateReadBarrier(invoke, output_loc, output_loc, base_loc, 0U, offset_loc);
}
break;
case Primitive::kPrimLong:
- __ movq(trg, Address(base, offset, ScaleFactor::TIMES_1, 0));
+ __ movq(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
break;
default:
@@ -1724,8 +1751,13 @@ static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
}
static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ bool can_call = kEmitCompilerReadBarrier &&
+ (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+ invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
+ can_call ?
+ LocationSummary::kCallOnSlowPath :
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
@@ -1754,22 +1786,22 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invo
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
+ GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_);
}
@@ -1961,13 +1993,18 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
- // locked cmpxchg has full barrier semantics, and we don't need
+ // LOCK CMPXCHG has full barrier semantics, and we don't need
// scheduling barriers at this time.
// Convert ZF into the boolean result.
__ setcc(kZero, out);
__ movzxb(out, out);
+ // In the case of the `UnsafeCASObject` intrinsic, accessing an
+ // object in the heap with LOCK CMPXCHG does not require a read
+ // barrier, as we do not keep a reference to this heap location.
+ // However, if heap poisoning is enabled, we need to unpoison the
+ // values that were poisoned earlier.
if (kPoisonHeapReferences) {
if (base_equals_value) {
// `value_reg` has been moved to a temporary register, no need
@@ -1992,7 +2029,7 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c
LOG(FATAL) << "Unexpected CAS type " << type;
}
- // locked cmpxchg has full barrier semantics, and we don't need
+ // LOCK CMPXCHG has full barrier semantics, and we don't need
// scheduling barriers at this time.
// Convert ZF into the boolean result.
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 1181007666..63bbc2cd0a 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -594,6 +594,10 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
return intrinsified_;
}
+ void SetIntrinsified(bool intrinsified) {
+ intrinsified_ = intrinsified;
+ }
+
private:
ArenaVector<Location> inputs_;
ArenaVector<Location> temps_;
@@ -613,7 +617,7 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
RegisterSet live_registers_;
// Whether these are locations for an intrinsified call.
- const bool intrinsified_;
+ bool intrinsified_;
ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 716888a269..73a44ee2cb 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2048,6 +2048,16 @@ bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
return !opt.GetDoesNotNeedDexCache();
}
+void HInvokeStaticOrDirect::InsertInputAt(size_t index, HInstruction* input) {
+ inputs_.insert(inputs_.begin() + index, HUserRecord<HInstruction*>(input));
+ input->AddUseAt(this, index);
+ // Update indexes in use nodes of inputs that have been pushed further back by the insert().
+ for (size_t i = index + 1u, size = inputs_.size(); i != size; ++i) {
+ DCHECK_EQ(InputRecordAt(i).GetUseNode()->GetIndex(), i - 1u);
+ InputRecordAt(i).GetUseNode()->SetIndex(i);
+ }
+}
+
void HInvokeStaticOrDirect::RemoveInputAt(size_t index) {
RemoveAsUserOfInput(index);
inputs_.erase(inputs_.begin() + index);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 79b79d7a0d..1da2a1dfd0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1436,7 +1436,7 @@ class SideEffects : public ValueObject {
return flags_ == (kAllChangeBits | kAllDependOnBits);
}
- // Returns true if this may read something written by other.
+ // Returns true if `this` may read something written by `other`.
bool MayDependOn(SideEffects other) const {
const uint64_t depends_on_flags = (flags_ & kAllDependOnBits) >> kChangeBits;
return (other.flags_ & depends_on_flags);
@@ -3440,6 +3440,7 @@ class HInvokeStaticOrDirect : public HInvoke {
dispatch_info_ = dispatch_info;
}
+ void InsertInputAt(size_t index, HInstruction* input);
void RemoveInputAt(size_t index);
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -3460,7 +3461,7 @@ class HInvokeStaticOrDirect : public HInvoke {
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
- bool HasPcRelDexCache() const {
+ bool HasPcRelativeDexCache() const {
return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
}
bool HasCurrentMethodInput() const {
@@ -3488,7 +3489,7 @@ class HInvokeStaticOrDirect : public HInvoke {
}
uint32_t GetDexCacheArrayOffset() const {
- DCHECK(HasPcRelDexCache());
+ DCHECK(HasPcRelativeDexCache());
return dispatch_info_.method_load_data;
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 368a30e4b1..27ee47296c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -24,7 +24,7 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
-#include "constant_area_fixups_x86.h"
+#include "pc_relative_fixups_x86.h"
#endif
#include "art_method-inl.h"
@@ -383,6 +383,13 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
|| instruction_set == kX86_64;
}
+// Read barrier are supported only on x86 and x86-64 at the moment.
+// TODO: Add support for other architectures and remove this function
+static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) {
+ return instruction_set == kX86
+ || instruction_set == kX86_64;
+}
+
static void RunOptimizations(HOptimization* optimizations[],
size_t length,
PassObserver* pass_observer) {
@@ -435,10 +442,9 @@ static void RunArchOptimizations(InstructionSet instruction_set,
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
- x86::ConstantAreaFixups* constant_area_fixups =
- new (arena) x86::ConstantAreaFixups(graph, stats);
+ x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats);
HOptimization* x86_optimizations[] = {
- constant_area_fixups
+ pc_relative_fixups
};
RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
break;
@@ -674,6 +680,12 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
return nullptr;
}
+ // When read barriers are enabled, do not attempt to compile for
+ // instruction sets that have no read barrier support.
+ if (kEmitCompilerReadBarrier && !InstructionSetSupportsReadBarrier(instruction_set)) {
+ return nullptr;
+ }
+
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
@@ -842,9 +854,14 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (kIsDebugBuild &&
IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
- // For testing purposes, we put a special marker on method names that should be compiled
- // with this compiler. This makes sure we're not regressing.
+ IsInstructionSetSupported(compiler_driver->GetInstructionSet()) &&
+ (!kEmitCompilerReadBarrier ||
+ InstructionSetSupportsReadBarrier(compiler_driver->GetInstructionSet()))) {
+ // For testing purposes, we put a special marker on method names
+ // that should be compiled with this compiler (when the the
+ // instruction set is supported -- and has support for read
+ // barriers, if they are enabled). This makes sure we're not
+ // regressing.
std::string method_name = PrettyMethod(method_idx, dex_file);
bool shouldCompile = method_name.find("$opt$") != std::string::npos;
DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index c3470002c5..c2894c7338 100644
--- a/compiler/optimizing/constant_area_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "constant_area_fixups_x86.h"
+#include "pc_relative_fixups_x86.h"
namespace art {
namespace x86 {
@@ -22,9 +22,9 @@ namespace x86 {
/**
* Finds instructions that need the constant area base as an input.
*/
-class ConstantHandlerVisitor : public HGraphVisitor {
+class PCRelativeHandlerVisitor : public HGraphVisitor {
public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+ explicit PCRelativeHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
private:
void VisitAdd(HAdd* add) OVERRIDE {
@@ -72,7 +72,7 @@ class ConstantHandlerVisitor : public HGraphVisitor {
void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
// We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
// address the constant area.
- InitializeConstantAreaPointer(switch_insn);
+ InitializePCRelativeBasePointer(switch_insn);
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
@@ -84,7 +84,7 @@ class ConstantHandlerVisitor : public HGraphVisitor {
block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
}
- void InitializeConstantAreaPointer(HInstruction* user) {
+ void InitializePCRelativeBasePointer(HInstruction* user) {
// Ensure we only initialize the pointer once.
if (base_ != nullptr) {
return;
@@ -99,16 +99,24 @@ class ConstantHandlerVisitor : public HGraphVisitor {
}
void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
+ InitializePCRelativeBasePointer(insn);
HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
+ new (GetGraph()->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ insn->GetBlock()->InsertInstructionBefore(load_constant, insn);
insn->ReplaceInput(load_constant, input_index);
}
void HandleInvoke(HInvoke* invoke) {
+ // If this is an invoke-static/-direct with PC-relative dex cache array
+ // addressing, we need the PC-relative address base.
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasPcRelativeDexCache()) {
+ InitializePCRelativeBasePointer(invoke);
+ // Add the extra parameter base_.
+ uint32_t index = invoke_static_or_direct->GetCurrentMethodInputIndex();
+ DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
+ invoke_static_or_direct->InsertInputAt(index, base_);
+ }
// Ensure that we can load FP arguments from the constant area.
for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
HConstant* input = invoke->InputAt(i)->AsConstant();
@@ -123,8 +131,8 @@ class ConstantHandlerVisitor : public HGraphVisitor {
HX86ComputeBaseMethodAddress* base_;
};
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
+void PcRelativeFixups::Run() {
+ PCRelativeHandlerVisitor visitor(graph_);
visitor.VisitInsertionOrder();
}
diff --git a/compiler/optimizing/constant_area_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index 4138039cdd..af708acb03 100644
--- a/compiler/optimizing/constant_area_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
-#define ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#ifndef ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
+#define ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
#include "nodes.h"
#include "optimization.h"
@@ -23,10 +23,10 @@
namespace art {
namespace x86 {
-class ConstantAreaFixups : public HOptimization {
+class PcRelativeFixups : public HOptimization {
public:
- ConstantAreaFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "constant_area_fixups_x86", stats) {}
+ PcRelativeFixups(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "pc_relative_fixups_x86", stats) {}
void Run() OVERRIDE;
};
@@ -34,4 +34,4 @@ class ConstantAreaFixups : public HOptimization {
} // namespace x86
} // namespace art
-#endif // ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#endif // ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_X86_H_
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index ec45d6b2ca..9bbc354290 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -129,13 +129,13 @@ TEST(SideEffectsTest, NoDependences) {
TEST(SideEffectsTest, VolatileDependences) {
SideEffects volatile_write =
- SideEffects::FieldWriteOfType(Primitive::kPrimInt, true);
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ true);
SideEffects any_write =
- SideEffects::FieldWriteOfType(Primitive::kPrimInt, false);
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ false);
SideEffects volatile_read =
- SideEffects::FieldReadOfType(Primitive::kPrimByte, true);
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, /* is_volatile */ true);
SideEffects any_read =
- SideEffects::FieldReadOfType(Primitive::kPrimByte, false);
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, /* is_volatile */ false);
EXPECT_FALSE(volatile_write.MayDependOn(any_read));
EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -151,15 +151,15 @@ TEST(SideEffectsTest, VolatileDependences) {
TEST(SideEffectsTest, SameWidthTypes) {
// Type I/F.
testWriteAndReadDependence(
- SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
- SideEffects::FieldReadOfType(Primitive::kPrimFloat, false));
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, /* is_volatile */ false),
+ SideEffects::FieldReadOfType(Primitive::kPrimFloat, /* is_volatile */ false));
testWriteAndReadDependence(
SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
// Type L/D.
testWriteAndReadDependence(
- SideEffects::FieldWriteOfType(Primitive::kPrimLong, false),
- SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+ SideEffects::FieldWriteOfType(Primitive::kPrimLong, /* is_volatile */ false),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, /* is_volatile */ false));
testWriteAndReadDependence(
SideEffects::ArrayWriteOfType(Primitive::kPrimLong),
SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
@@ -171,9 +171,9 @@ TEST(SideEffectsTest, AllWritesAndReads) {
for (Primitive::Type type = Primitive::kPrimNot;
type < Primitive::kPrimVoid;
type = Primitive::Type(type + 1)) {
- s = s.Union(SideEffects::FieldWriteOfType(type, false));
+ s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
s = s.Union(SideEffects::ArrayWriteOfType(type));
- s = s.Union(SideEffects::FieldReadOfType(type, false));
+ s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
s = s.Union(SideEffects::ArrayReadOfType(type));
}
EXPECT_TRUE(s.DoesAllReadWrite());
@@ -225,10 +225,10 @@ TEST(SideEffectsTest, BitStrings) {
"||DJ|||||", // note: DJ alias
SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str());
SideEffects s = SideEffects::None();
- s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, false));
- s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, false));
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, /* is_volatile */ false));
s = s.Union(SideEffects::ArrayWriteOfType(Primitive::kPrimShort));
- s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, false));
+ s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, /* is_volatile */ false));
s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
EXPECT_STREQ(
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 92ed58cb86..68cf6d9233 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1357,7 +1357,7 @@ class Dex2Oat FINAL {
int32_t image_patch_delta = 0;
if (app_image_ && image_base_ == 0) {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
image_base_ = RoundUp(
reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
kPageSize);
@@ -1370,7 +1370,7 @@ class Dex2Oat FINAL {
if (!IsBootImage()) {
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
image_file_location_oat_data_begin =
reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 2466f33d1e..dd1002c4c7 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -40,11 +40,12 @@ static const char* gProgName = "dexdump";
*/
static void usage(void) {
fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-c] [-d] [-f] [-h] [-i] [-l layout] [-o outfile]"
+ fprintf(stderr, "%s: [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
" [-t tempfile] dexfile...\n", gProgName);
fprintf(stderr, "\n");
fprintf(stderr, " -c : verify checksum and exit\n");
fprintf(stderr, " -d : disassemble code sections\n");
+ fprintf(stderr, " -e : display exported items only\n");
fprintf(stderr, " -f : display summary information from file header\n");
fprintf(stderr, " -g : dump CFG for dex\n");
fprintf(stderr, " -h : display file header details\n");
@@ -69,7 +70,7 @@ int dexdumpDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "cdfghil:t:o:");
+ const int ic = getopt(argc, argv, "cdefghil:t:o:");
if (ic < 0) {
break; // done
}
@@ -80,6 +81,9 @@ int dexdumpDriver(int argc, char** argv) {
case 'd': // disassemble Dalvik instructions
gOptions.disassemble = true;
break;
+ case 'e': // exported items only
+ gOptions.exportsOnly = true;
+ break;
case 'f': // dump outer file header
gOptions.showFileHeaders = true;
break;
@@ -98,7 +102,6 @@ int dexdumpDriver(int argc, char** argv) {
} else if (strcmp(optarg, "xml") == 0) {
gOptions.outputFormat = OUTPUT_XML;
gOptions.verbose = false;
- gOptions.exportsOnly = true;
} else {
wantUsage = true;
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 304d4e5860..5e710530b9 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -814,7 +814,7 @@ class ImgDiagDumper {
static const ImageHeader& GetBootImageHeader() {
gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
CHECK(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
return image_header;
@@ -838,7 +838,7 @@ static int DumpImage(Runtime* runtime, const char* image_location,
std::ostream* os, pid_t image_diff_pid) {
ScopedObjectAccess soa(Thread::Current());
gc::Heap* heap = runtime->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
CHECK(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 82bc8b9521..0d6a8c9dee 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -42,7 +42,7 @@ class ImgDiagTest : public CommonRuntimeTest {
CommonRuntimeTest::SetUp();
// We loaded the runtime with an explicit image. Therefore the image space must exist.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
ASSERT_TRUE(image_space != nullptr);
boot_image_location_ = image_space->GetImageLocation();
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a163380b25..cd83de6265 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1618,9 +1618,9 @@ class ImageDumper {
dex_caches_.clear();
{
ReaderMutexLock mu(self, *class_linker->DexLock());
- for (jobject weak_root : class_linker->GetDexCaches()) {
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
mirror::DexCache* dex_cache =
- down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
if (dex_cache != nullptr) {
dex_caches_.insert(dex_cache);
}
@@ -2374,7 +2374,7 @@ static int DumpImage(Runtime* runtime, const char* image_location, OatDumperOpti
ScopedObjectAccess soa(Thread::Current());
gc::Heap* heap = runtime->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
CHECK(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 64b4722904..c587f68234 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -189,7 +189,7 @@ bool PatchOat::Patch(const std::string& image_location, off_t delta,
LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
- gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings);
t.NewTiming("Patching files");
@@ -288,7 +288,7 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d
LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
- gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat,
PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 76c7c4f6f0..be33b0e235 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -164,9 +164,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Deoptimization from compiled code.
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
- // Read barrier
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
+ qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
}
} // namespace art
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 371cbb2673..63285a4015 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -147,9 +147,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Deoptimization from compiled code.
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
- // Read barrier
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
+ qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
} // namespace art
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
index f9c53152f6..74e7638070 100644
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -45,7 +45,8 @@ static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
entrypoint == kQuickCmpgFloat ||
entrypoint == kQuickCmplDouble ||
entrypoint == kQuickCmplFloat ||
- entrypoint == kQuickReadBarrierSlow;
+ entrypoint == kQuickReadBarrierSlow ||
+ entrypoint == kQuickReadBarrierForRootSlow;
}
} // namespace art
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 59421dd8b9..cba427dff8 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -271,10 +271,14 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pA64Store = QuasiAtomic::Write64;
static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct.");
qpoints->pReadBarrierSlow = artReadBarrierSlow;
static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
+ qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
+ static_assert(IsDirectEntrypoint(kQuickReadBarrierForRootSlow),
+ "Direct C stub not marked direct.");
};
} // namespace art
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 417d5fc632..89f54ddc04 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -178,9 +178,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pA64Load = QuasiAtomic::Read64;
qpoints->pA64Store = QuasiAtomic::Write64;
- // Read barrier
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
+ qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
} // namespace art
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 1d10e5db39..afa48cde34 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -2151,4 +2151,39 @@ TEST_F(StubTest, ReadBarrier) {
#endif
}
+TEST_F(StubTest, ReadBarrierForRoot) {
+#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+ Thread* self = Thread::Current();
+
+ const uintptr_t readBarrierForRootSlow =
+ StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
+
+ // Create an object
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ StackHandleScope<1> hs(soa.Self());
+
+ Handle<mirror::String> obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ GcRoot<mirror::Class>& root = mirror::String::java_lang_String_;
+ size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+ EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
+ mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
+ EXPECT_EQ(klass, obj->GetClass());
+
+ // Tests done.
+#else
+ LOG(INFO) << "Skipping read_barrier_for_root_slow";
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
+#endif
+}
+
} // namespace art
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 019546f2ce..e20001864d 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -29,6 +29,7 @@ extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
// Read barrier entrypoints.
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
@@ -136,9 +137,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
- // Read barrier
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
+ qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
};
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2f485ae644..6eacac1ede 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1588,14 +1588,22 @@ DEFINE_FUNCTION art_nested_signal_return
END_FUNCTION art_nested_signal_return
DEFINE_FUNCTION art_quick_read_barrier_slow
- PUSH edx // pass arg3 - offset
- PUSH ecx // pass arg2 - obj
- PUSH eax // pass arg1 - ref
- call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
- addl LITERAL(12), %esp // pop arguments
+ PUSH edx // pass arg3 - offset
+ PUSH ecx // pass arg2 - obj
+ PUSH eax // pass arg1 - ref
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
+ addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_read_barrier_slow
+DEFINE_FUNCTION art_quick_read_barrier_for_root_slow
+ PUSH eax // pass arg1 - root
+ call SYMBOL(artReadBarrierForRootSlow) // artReadBarrierForRootSlow(root)
+ addl LITERAL(4), %esp // pop argument
+ CFI_ADJUST_CFA_OFFSET(-4)
+ ret
+END_FUNCTION art_quick_read_barrier_for_root_slow
+
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index eae09ee1f4..2b38c9db35 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -30,6 +30,7 @@ extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
// Read barrier entrypoints.
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
@@ -140,9 +141,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
- // Read barrier
+ // Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
+ qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
#endif // __APPLE__
};
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5fd8969248..17d277e1fd 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1722,3 +1722,14 @@ DEFINE_FUNCTION art_quick_read_barrier_slow
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_read_barrier_slow
+
+DEFINE_FUNCTION art_quick_read_barrier_for_root_slow
+ SETUP_FP_CALLEE_SAVE_FRAME
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
+ call SYMBOL(artReadBarrierForRootSlow) // artReadBarrierForRootSlow(root)
+ addq LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ ret
+END_FUNCTION art_quick_read_barrier_for_root_slow
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index dbb546da29..f7ed81254f 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -67,6 +67,18 @@ mirror::String* ArtMethod::GetNameAsString(Thread* self) {
dex_cache);
}
+void ArtMethod::ThrowInvocationTimeError() {
+ DCHECK(!IsInvokable());
+ // NOTE: IsDefaultConflicting must be first since the actual method might or might not be abstract
+ // due to the way we select it.
+ if (IsDefaultConflicting()) {
+ ThrowIncompatibleClassChangeErrorForMethodConflict(this);
+ } else {
+ DCHECK(IsAbstract());
+ ThrowAbstractMethodError(this);
+ }
+}
+
InvokeType ArtMethod::GetInvokeType() {
// TODO: kSuper?
if (GetDeclaringClass()->IsInterface()) {
@@ -330,6 +342,10 @@ void ArtMethod::UnregisterNative() {
RegisterNative(GetJniDlsymLookupStub(), false);
}
+bool ArtMethod::IsOverridableByDefaultMethod() {
+ return GetDeclaringClass()->IsInterface();
+}
+
bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) {
auto* dex_cache = GetDexCache();
auto* dex_file = dex_cache->GetDexFile();
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 201b3e64da..5a2d6c36ed 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -136,6 +136,19 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccMiranda) != 0;
}
+ // Returns true if invoking this method will not throw an AbstractMethodError or
+ // IncompatibleClassChangeError.
+ bool IsInvokable() {
+ return !IsAbstract() && !IsDefaultConflicting();
+ }
+
+ // A default conflict method is a special sentinel method that stands for a conflict between
+ // multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one
+ // attempts to do so.
+ bool IsDefaultConflicting() {
+ return (GetAccessFlags() & kAccDefaultConflict) != 0u;
+ }
+
// This is set by the class linker.
bool IsDefault() {
return (GetAccessFlags() & kAccDefault) != 0;
@@ -170,12 +183,14 @@ class ArtMethod FINAL {
}
// Returns true if this method could be overridden by a default method.
- bool IsOverridableByDefaultMethod() {
- return IsDefault() || IsAbstract();
- }
+ bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Throws the error that would result from trying to invoke this method (i.e.
+ // IncompatibleClassChangeError or AbstractMethodError). Only call if !IsInvokable();
+ void ThrowInvocationTimeError() SHARED_REQUIRES(Locks::mutator_lock_);
+
uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
// Doesn't do erroneous / unresolved class checks.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 69f6fe96ff..b548dfb639 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -122,7 +122,7 @@ ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 150 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 151 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 71afa0f709..771b2d0509 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -316,22 +316,22 @@ void ArenaAllocator::UpdateBytesAllocated() {
}
void* ArenaAllocator::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
+ // We mark all memory for a newly retrieved arena as inaccessible and then
+ // mark only the actually allocated memory as defined. That leaves red zones
+ // and padding between allocations marked as inaccessible.
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
// Obtain a new block.
ObtainNewArenaForAllocation(rounded_bytes);
CHECK(ptr_ != nullptr);
- MEMORY_TOOL_MAKE_UNDEFINED(ptr_, end_ - ptr_);
+ MEMORY_TOOL_MAKE_NOACCESS(ptr_, end_ - ptr_);
}
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
uint8_t* ret = ptr_;
ptr_ += rounded_bytes;
- // Check that the memory is already zeroed out.
- for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
- CHECK_EQ(*ptr, 0U);
- }
MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
- MEMORY_TOOL_MAKE_NOACCESS(ret + bytes, rounded_bytes - bytes);
+ // Check that the memory is already zeroed out.
+ DCHECK(std::all_of(ret, ret + bytes, [](uint8_t val) { return val == 0u; }));
return ret;
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index ace6c388af..36334c4129 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -288,6 +288,11 @@ class ArenaPool {
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
+// Fast single-threaded allocator for zero-initialized memory chunks.
+//
+// Memory is allocated from ArenaPool in large chunks and then rationed through
+// the ArenaAllocator. It's returned to the ArenaPool only when the ArenaAllocator
+// is destroyed.
class ArenaAllocator
: private DebugStackRefCounter, private ArenaAllocatorStats, private ArenaAllocatorMemoryTool {
public:
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 9c78ee59dd..d6a44f7293 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -53,6 +53,7 @@ static constexpr int CTZ(T x) {
: __builtin_ctzll(x);
}
+// Return the number of 1-bits in `x`.
template<typename T>
static constexpr int POPCOUNT(T x) {
return (sizeof(T) == sizeof(uint32_t))
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index 31f96e4783..90c6ee34ec 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -91,16 +91,19 @@ void ArenaStack::UpdateBytesAllocated() {
}
void* ArenaStack::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
+ // We mark all memory for a newly retrieved arena as inaccessible and then
+ // mark only the actually allocated memory as defined. That leaves red zones
+ // and padding between allocations marked as inaccessible.
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
CHECK(ptr != nullptr) << "Failed to allocate memory";
+ MEMORY_TOOL_MAKE_NOACCESS(ptr, top_end_);
}
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
MEMORY_TOOL_MAKE_UNDEFINED(ptr, bytes);
- MEMORY_TOOL_MAKE_NOACCESS(ptr + bytes, rounded_bytes - bytes);
return ptr;
}
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index a30c73d749..a87153bd77 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -42,6 +42,7 @@ enum class ArenaFreeTag : uint8_t {
static constexpr size_t kArenaAlignment = 8;
// Holds a list of Arenas for use by ScopedArenaAllocator stack.
+// The memory is returned to the ArenaPool when the ArenaStack is destroyed.
class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
public:
explicit ArenaStack(ArenaPool* arena_pool);
@@ -121,6 +122,12 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
DISALLOW_COPY_AND_ASSIGN(ArenaStack);
};
+// Fast single-threaded allocator. Allocated chunks are _not_ guaranteed to be zero-initialized.
+//
+// Unlike the ArenaAllocator, ScopedArenaAllocator is intended for relatively short-lived
+// objects and allows nesting multiple allocators. Only the top allocator can be used but
+// once it's destroyed, its memory can be reused by the next ScopedArenaAllocator on the
+// stack. This is facilitated by returning the memory to the ArenaStack.
class ScopedArenaAllocator
: private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
public:
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bafd5edfc6..dde100125a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -303,7 +303,7 @@ static void ShuffleForward(size_t* current_field_idx,
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
: dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
- dex_cache_image_class_lookup_required_(false),
+ dex_cache_boot_image_class_lookup_required_(false),
failed_dex_cache_class_lookups_(0),
class_roots_(nullptr),
array_iftable_(nullptr),
@@ -368,10 +368,12 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusLoaded, self);
java_lang_Object->SetObjectSize(sizeof(mirror::Object));
- runtime->SetSentinel(heap->AllocObject<true>(self,
- java_lang_Object.Get(),
- java_lang_Object->GetObjectSize(),
- VoidFunctor()));
+ // Allocate in non-movable so that it's possible to check if a JNI weak global ref has been
+ // cleared without triggering the read barrier and unintentionally mark the sentinel alive.
+ runtime->SetSentinel(heap->AllocNonMovableObject<true>(self,
+ java_lang_Object.Get(),
+ java_lang_Object->GetObjectSize(),
+ VoidFunctor()));
// Object[] next to hold class roots.
Handle<mirror::Class> object_array_class(hs.NewHandle(
@@ -794,7 +796,7 @@ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_
CHECK_EQ(field.GetDeclaringClass(), klass);
}
auto* runtime = Runtime::Current();
- auto* image_space = runtime->GetHeap()->GetImageSpace();
+ auto* image_space = runtime->GetHeap()->GetBootImageSpace();
auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetDirectMethods(pointer_size)) {
SanityCheckArtMethod(&m, klass, image_space);
@@ -855,10 +857,10 @@ void ClassLinker::InitFromImage() {
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
gc::Heap* const heap = runtime->GetHeap();
- gc::space::ImageSpace* const space = heap->GetImageSpace();
+ gc::space::ImageSpace* const space = heap->GetBootImageSpace();
CHECK(space != nullptr);
image_pointer_size_ = space->GetImageHeader().GetPointerSize();
- dex_cache_image_class_lookup_required_ = true;
+ dex_cache_boot_image_class_lookup_required_ = true;
const OatFile* oat_file = runtime->GetOatFileManager().RegisterImageOatFile(space);
DCHECK(oat_file != nullptr);
CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
@@ -886,10 +888,12 @@ void ClassLinker::InitFromImage() {
mirror::Class* java_lang_Object = GetClassRoot(kJavaLangObject);
java_lang_Object->SetObjectSize(sizeof(mirror::Object));
- Runtime::Current()->SetSentinel(heap->AllocObject<true>(self,
- java_lang_Object,
- java_lang_Object->GetObjectSize(),
- VoidFunctor()));
+ // Allocate in non-movable so that it's possible to check if a JNI weak global ref has been
+ // cleared without triggering the read barrier and unintentionally mark the sentinel alive.
+ runtime->SetSentinel(heap->AllocNonMovableObject<true>(self,
+ java_lang_Object,
+ java_lang_Object->GetObjectSize(),
+ VoidFunctor()));
CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
@@ -1086,8 +1090,8 @@ void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
}
void ClassLinker::VisitClasses(ClassVisitor* visitor) {
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
+ if (dex_cache_boot_image_class_lookup_required_) {
+ AddBootImageClassesToClassTable();
}
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -1858,7 +1862,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool*
// Special case to get oat code without overwriting a trampoline.
const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) {
- CHECK(!method->IsAbstract()) << PrettyMethod(method);
+ CHECK(method->IsInvokable()) << PrettyMethod(method);
if (method->IsProxyMethod()) {
return GetQuickProxyInvokeHandler();
}
@@ -1878,7 +1882,7 @@ const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) {
}
const void* ClassLinker::GetOatMethodQuickCodeFor(ArtMethod* method) {
- if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+ if (method->IsNative() || !method->IsInvokable() || method->IsProxyMethod()) {
return nullptr;
}
bool found;
@@ -1973,6 +1977,13 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
// Ignore virtual methods on the iterator.
}
+void ClassLinker::EnsureThrowsInvocationError(ArtMethod* method) {
+ DCHECK(method != nullptr);
+ DCHECK(!method->IsInvokable());
+ method->SetEntryPointFromQuickCompiledCodePtrSize(quick_to_interpreter_bridge_trampoline_,
+ image_pointer_size_);
+}
+
void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
Runtime* const runtime = Runtime::Current();
@@ -1992,8 +2003,8 @@ void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class
// Install entry point from interpreter.
bool enter_interpreter = NeedsInterpreter(method, method->GetEntryPointFromQuickCompiledCode());
- if (method->IsAbstract()) {
- method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ if (!method->IsInvokable()) {
+ EnsureThrowsInvocationError(method);
return;
}
@@ -2317,17 +2328,22 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
// Clean up pass to remove null dex caches.
// Null dex caches can occur due to class unloading and we are lazily removing null entries.
JavaVMExt* const vm = self->GetJniEnv()->vm;
- for (auto it = dex_caches_.begin(); it != dex_caches_.end();) {
- mirror::Object* dex_cache_root = self->DecodeJObject(*it);
- if (dex_cache_root == nullptr) {
- vm->DeleteWeakGlobalRef(self, *it);
+ for (auto it = dex_caches_.begin(); it != dex_caches_.end(); ) {
+ DexCacheData data = *it;
+ if (self->IsJWeakCleared(data.weak_root)) {
+ vm->DeleteWeakGlobalRef(self, data.weak_root);
it = dex_caches_.erase(it);
} else {
++it;
}
}
- dex_caches_.push_back(vm->AddWeakGlobalRef(self, dex_cache.Get()));
+ jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache.Get());
dex_cache->SetDexFile(&dex_file);
+ DexCacheData data;
+ data.weak_root = dex_cache_jweak;
+ data.dex_file = dex_cache->GetDexFile();
+ data.resolved_types = dex_cache->GetResolvedTypes();
+ dex_caches_.push_back(data);
}
mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc) {
@@ -2374,10 +2390,16 @@ mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
const DexFile& dex_file,
bool allow_failure) {
// Search assuming unique-ness of dex file.
- for (jweak weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
- if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
- return dex_cache;
+ for (const DexCacheData& data : dex_caches_) {
+ // Avoid decoding (and read barriers) other unrelated dex caches.
+ if (data.dex_file == &dex_file) {
+ mirror::DexCache* dex_cache =
+ down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ return dex_cache;
+ } else {
+ break;
+ }
}
}
if (allow_failure) {
@@ -2385,8 +2407,8 @@ mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
}
std::string location(dex_file.GetLocation());
// Failure, dump diagnostic and abort.
- for (jobject weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
+ for (const DexCacheData& data : dex_caches_) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
if (dex_cache != nullptr) {
LOG(ERROR) << "Registered dex file " << dex_cache->GetDexFile()->GetLocation();
}
@@ -2398,10 +2420,13 @@ mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, dex_lock_);
- for (jobject weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
- if (dex_cache != nullptr) {
- dex_cache->Fixup(resolution_method, image_pointer_size_);
+ for (const DexCacheData& data : dex_caches_) {
+ if (!self->IsJWeakCleared(data.weak_root)) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ self->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ dex_cache->Fixup(resolution_method, image_pointer_size_);
+ }
}
}
}
@@ -2636,11 +2661,13 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr &&
- dex_cache_image_class_lookup_required_) {
+ if (kIsDebugBuild &&
+ !klass->IsTemp() &&
+ class_loader == nullptr &&
+ dex_cache_boot_image_class_lookup_required_) {
// Check a class loaded with the system class loader matches one in the image if the class
// is in the image.
- existing = LookupClassFromImage(descriptor);
+ existing = LookupClassFromBootImage(descriptor);
if (existing != nullptr) {
CHECK_EQ(klass, existing);
}
@@ -2684,11 +2711,11 @@ mirror::Class* ClassLinker::LookupClass(Thread* self,
}
}
}
- if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) {
+ if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
return nullptr;
}
// Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromImage(descriptor);
+ mirror::Class* result = LookupClassFromBootImage(descriptor);
if (result != nullptr) {
result = InsertClass(descriptor, result, hash);
} else {
@@ -2697,37 +2724,43 @@ mirror::Class* ClassLinker::LookupClass(Thread* self,
// classes into the class table.
constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- MoveImageClassesToClassTable();
+ AddBootImageClassesToClassTable();
}
}
return result;
}
-static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
+static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches(gc::space::ImageSpace* image_space)
SHARED_REQUIRES(Locks::mutator_lock_) {
- gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
- CHECK(image != nullptr);
- mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ CHECK(image_space != nullptr);
+ mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ DCHECK(root != nullptr);
return root->AsObjectArray<mirror::DexCache>();
}
-void ClassLinker::MoveImageClassesToClassTable() {
+void ClassLinker::AddBootImageClassesToClassTable() {
+ if (dex_cache_boot_image_class_lookup_required_) {
+ AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpace(),
+ /*class_loader*/nullptr);
+ dex_cache_boot_image_class_lookup_required_ = false;
+ }
+}
+
+void ClassLinker::AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+ mirror::ClassLoader* class_loader) {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- if (!dex_cache_image_class_lookup_required_) {
- return; // All dex cache classes are already in the class table.
- }
ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table");
- mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(image_space);
std::string temp;
- ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
mirror::Class* klass = types[j].Read();
if (klass != nullptr) {
- DCHECK(klass->GetClassLoader() == nullptr);
+ DCHECK_EQ(klass->GetClassLoader(), class_loader);
const char* descriptor = klass->GetDescriptor(&temp);
size_t hash = ComputeModifiedUtf8Hash(descriptor);
mirror::Class* existing = class_table->Lookup(descriptor, hash);
@@ -2743,7 +2776,6 @@ void ClassLinker::MoveImageClassesToClassTable() {
}
}
}
- dex_cache_image_class_lookup_required_ = false;
}
class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
@@ -2767,9 +2799,10 @@ void ClassLinker::MoveClassTableToPreZygote() {
VisitClassLoaders(&visitor);
}
-mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
+mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
- mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(
+ Runtime::Current()->GetHeap()->GetBootImageSpace());
for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -2811,8 +2844,8 @@ class LookupClassesVisitor : public ClassLoaderVisitor {
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
result.clear();
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
+ if (dex_cache_boot_image_class_lookup_required_) {
+ AddBootImageClassesToClassTable();
}
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -3331,15 +3364,18 @@ ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, dex_lock_);
// Locate the dex cache of the original interface/Object
- for (jobject weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
- if (dex_cache != nullptr &&
- proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes(),
+ for (const DexCacheData& data : dex_caches_) {
+ if (!self->IsJWeakCleared(data.weak_root) &&
+ proxy_method->HasSameDexCacheResolvedTypes(data.resolved_types,
image_pointer_size_)) {
- ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
- proxy_method->GetDexMethodIndex(), image_pointer_size_);
- CHECK(resolved_method != nullptr);
- return resolved_method;
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ self->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex(), image_pointer_size_);
+ CHECK(resolved_method != nullptr);
+ return resolved_method;
+ }
}
}
}
@@ -3404,7 +3440,7 @@ void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) cons
// Basic sanity
CHECK(!prototype->IsFinal());
CHECK(method->IsFinal());
- CHECK(!method->IsAbstract());
+ CHECK(method->IsInvokable());
// The proxy method doesn't have its own dex cache or dex file and so it steals those of its
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
@@ -4135,10 +4171,10 @@ bool ClassLinker::LinkClass(Thread* self,
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) {
+ if (kIsDebugBuild && class_loader == nullptr && dex_cache_boot_image_class_lookup_required_) {
// Check a class loaded with the system class loader matches one in the image if the class
// is in the image.
- mirror::Class* const image_class = LookupClassFromImage(descriptor);
+ mirror::Class* const image_class = LookupClassFromBootImage(descriptor);
if (image_class != nullptr) {
CHECK_EQ(klass.Get(), existing) << descriptor;
}
@@ -4480,7 +4516,7 @@ bool ClassLinker::LinkMethods(Thread* self,
// A map from vtable indexes to the method they need to be updated to point to. Used because we
// need to have default methods be in the virtuals array of each class but we don't set that up
// until LinkInterfaceMethods.
- std::unordered_map<size_t, ArtMethod*> default_translations;
+ std::unordered_map<size_t, ClassLinker::MethodTranslation> default_translations;
// Link virtual methods then interface methods.
// We set up the interface lookup table first because we need it to determine if we need to update
// any vtable entries with new default method implementations.
@@ -4613,7 +4649,7 @@ const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32
bool ClassLinker::LinkVirtualMethods(
Thread* self,
Handle<mirror::Class> klass,
- /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations) {
+ /*out*/std::unordered_map<size_t, ClassLinker::MethodTranslation>* default_translations) {
const size_t num_virtual_methods = klass->NumVirtualMethods();
if (klass->IsInterface()) {
// No vtable.
@@ -4736,46 +4772,55 @@ bool ClassLinker::LinkVirtualMethods(
<< " would have incorrectly overridden the package-private method in "
<< PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
}
- } else if (super_method->IsDefault()) {
+ } else if (super_method->IsOverridableByDefaultMethod()) {
// We didn't directly override this method but we might through default methods...
// Check for default method update.
ArtMethod* default_method = nullptr;
- std::string icce_message;
- if (!FindDefaultMethodImplementation(self,
- super_method,
- klass,
- /*out*/&default_method,
- /*out*/&icce_message)) {
- // An error occurred while finding default methods.
- // TODO This should actually be thrown when we attempt to invoke this method.
- ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
- return false;
- }
- // This should always work because we inherit superclass interfaces. We should either get
- // 1) An IncompatibleClassChangeError because of conflicting default method
- // implementations.
- // 2) The same default method implementation as the superclass.
- // 3) A default method that overrides the superclass's.
- // Therefore this check should never fail.
- CHECK(default_method != nullptr);
- if (UNLIKELY(default_method->GetDeclaringClass() != super_method->GetDeclaringClass())) {
- // TODO Refactor this add default methods to virtuals here and not in
- // LinkInterfaceMethods maybe.
- // The problem is default methods might override previously present default-method or
- // miranda-method vtable entries from the superclass. Unfortunately we need these to
- // be entries in this class's virtuals. We do not give these entries there until
- // LinkInterfaceMethods so we pass this map around to let it know which vtable
- // entries need to be updated.
- // Make a note that vtable entry j must be updated, store what it needs to be updated to.
- // We will allocate a virtual method slot in LinkInterfaceMethods and fix it up then.
- default_translations->insert({j, default_method});
- VLOG(class_linker) << "Method " << PrettyMethod(super_method) << " overridden by default "
- << PrettyMethod(default_method) << " in " << PrettyClass(klass.Get());
- } else {
- // They are the same method/no override
- // Cannot do direct comparison because we had to copy the ArtMethod object into the
- // superclass's vtable.
- continue;
+ switch (FindDefaultMethodImplementation(self,
+ super_method,
+ klass,
+ /*out*/&default_method)) {
+ case DefaultMethodSearchResult::kDefaultConflict: {
+ // A conflict was found looking for default methods. Note this (assuming it wasn't
+ // pre-existing) in the translations map.
+ if (UNLIKELY(!super_method->IsDefaultConflicting())) {
+ // Don't generate another conflict method to reduce memory use as an optimization.
+ default_translations->insert(
+ {j, ClassLinker::MethodTranslation::CreateConflictingMethod()});
+ }
+ break;
+ }
+ case DefaultMethodSearchResult::kAbstractFound: {
+ // No conflict but method is abstract.
+ // We note that this vtable entry must be made abstract.
+ if (UNLIKELY(!super_method->IsAbstract())) {
+ default_translations->insert(
+ {j, ClassLinker::MethodTranslation::CreateAbstractMethod()});
+ }
+ break;
+ }
+ case DefaultMethodSearchResult::kDefaultFound: {
+ if (UNLIKELY(super_method->IsDefaultConflicting() ||
+ default_method->GetDeclaringClass() != super_method->GetDeclaringClass())) {
+ // Found a default method implementation that is new.
+ // TODO Refactor this add default methods to virtuals here and not in
+ // LinkInterfaceMethods maybe.
+ // The problem is default methods might override previously present
+ // default-method or miranda-method vtable entries from the superclass.
+ // Unfortunately we need these to be entries in this class's virtuals. We do not
+ // give these entries there until LinkInterfaceMethods so we pass this map around
+ // to let it know which vtable entries need to be updated.
+ // Make a note that vtable entry j must be updated, store what it needs to be updated
+ // to. We will allocate a virtual method slot in LinkInterfaceMethods and fix it up
+ // then.
+ default_translations->insert(
+ {j, ClassLinker::MethodTranslation::CreateTranslatedMethod(default_method)});
+ VLOG(class_linker) << "Method " << PrettyMethod(super_method)
+ << " overridden by default " << PrettyMethod(default_method)
+ << " in " << PrettyClass(klass.Get());
+ }
+ break;
+ }
}
}
}
@@ -4828,23 +4873,75 @@ bool ClassLinker::LinkVirtualMethods(
return true;
}
+// Determine if the given iface has any subinterface in the given list that declares the method
+// specified by 'target'.
+//
+// Arguments
+// - self: The thread we are running on
+// - target: A comparator that will match any method that overrides the method we are checking for
+// - iftable: The iftable we are searching for an overriding method on.
+// - ifstart: The index of the interface we are checking to see if anything overrides
+// - iface: The interface we are checking to see if anything overrides.
+// - image_pointer_size:
+// The image pointer size.
+//
+// Returns
+// - True: There is some method that matches the target comparator defined in an interface that
+// is a subtype of iface.
+// - False: There is no method that matches the target comparator in any interface that is a subtype
+// of iface.
+static bool ContainsOverridingMethodOf(Thread* self,
+ MethodNameAndSignatureComparator& target,
+ Handle<mirror::IfTable> iftable,
+ size_t ifstart,
+ Handle<mirror::Class> iface,
+ size_t image_pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ DCHECK(iface.Get() != nullptr);
+ DCHECK(iftable.Get() != nullptr);
+ DCHECK_GE(ifstart, 0u);
+ DCHECK_LT(ifstart, iftable->Count());
+ DCHECK_EQ(iface.Get(), iftable->GetInterface(ifstart));
+ DCHECK(iface->IsInterface());
+
+ size_t iftable_count = iftable->Count();
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> current_iface(hs.NewHandle<mirror::Class>(nullptr));
+ for (size_t k = ifstart + 1; k < iftable_count; k++) {
+ // Skip ifstart since our current interface obviously cannot override itself.
+ current_iface.Assign(iftable->GetInterface(k));
+ size_t num_instance_methods = current_iface->NumVirtualMethods();
+ // Iterate through every method on this interface. The order does not matter so we go forwards.
+ for (size_t m = 0; m < num_instance_methods; m++) {
+ ArtMethod* current_method = current_iface->GetVirtualMethodUnchecked(m, image_pointer_size);
+ if (UNLIKELY(target.HasSameNameAndSignature(
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size)))) {
+ // Check if the i'th interface is a subtype of this one.
+ if (iface->IsAssignableFrom(current_iface.Get())) {
+ return true;
+ }
+ break;
+ }
+ }
+ }
+ return false;
+}
+
// Find the default method implementation for 'interface_method' in 'klass'. Stores it into
-// out_default_method and returns true on success. If no default method was found stores nullptr
-// into out_default_method and returns true. If an error occurs (such as a default_method conflict)
-// it will fill the icce_message with an appropriate message for an IncompatibleClassChangeError,
-// which should then be thrown by the caller.
-bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
- ArtMethod* target_method,
- Handle<mirror::Class> klass,
- /*out*/ArtMethod** out_default_method,
- /*out*/std::string* icce_message) const {
+// out_default_method and returns kDefaultFound on success. If no default method was found return
+// kAbstractFound and store nullptr into out_default_method. If an error occurs (such as a
+// default_method conflict) it will return kDefaultConflict.
+ClassLinker::DefaultMethodSearchResult ClassLinker::FindDefaultMethodImplementation(
+ Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method) const {
DCHECK(self != nullptr);
DCHECK(target_method != nullptr);
DCHECK(out_default_method != nullptr);
- DCHECK(icce_message != nullptr);
*out_default_method = nullptr;
- mirror::Class* chosen_iface = nullptr;
// We organize the interface table so that, for interface I any subinterfaces J follow it in the
// table. This lets us walk the table backwards when searching for default methods. The first one
@@ -4855,19 +4952,23 @@ bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
// The order of unrelated interfaces does not matter and is not defined.
size_t iftable_count = klass->GetIfTableCount();
if (iftable_count == 0) {
- // No interfaces. We have already reset out to null so just return true.
- return true;
+ // No interfaces. We have already reset out to null so just return kAbstractFound.
+ return DefaultMethodSearchResult::kAbstractFound;
}
- StackHandleScope<1> hs(self);
+ StackHandleScope<3> hs(self);
+ MutableHandle<mirror::Class> chosen_iface(hs.NewHandle<mirror::Class>(nullptr));
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
+ MutableHandle<mirror::Class> iface(hs.NewHandle<mirror::Class>(nullptr));
MethodNameAndSignatureComparator target_name_comparator(
target_method->GetInterfaceMethodIfProxy(image_pointer_size_));
// Iterates over the klass's iftable in reverse
- // We have a break at the end because size_t is unsigned.
- for (size_t k = iftable_count - 1; /* break if k == 0 at end */; --k) {
+ for (size_t k = iftable_count; k != 0; ) {
+ --k;
+
DCHECK_LT(k, iftable->Count());
- mirror::Class* iface = iftable->GetInterface(k);
+
+ iface.Assign(iftable->GetInterface(k));
size_t num_instance_methods = iface->NumVirtualMethods();
// Iterate through every method on this interface. The order does not matter so we go forwards.
for (size_t m = 0; m < num_instance_methods; m++) {
@@ -4880,31 +4981,60 @@ bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
}
// The verifier should have caught the non-public method.
DCHECK(current_method->IsPublic()) << "Interface method is not public!";
- if (UNLIKELY(chosen_iface != nullptr)) {
- // We have multiple default impls of the same method. We need to check they do not
- // conflict and throw an error if they do. Conflicting means that the current iface is not
- // masked by the chosen interface.
- if (!iface->IsAssignableFrom(chosen_iface)) {
- *icce_message = StringPrintf("Conflicting default method implementations: '%s' and '%s'",
- PrettyMethod(current_method).c_str(),
- PrettyMethod(*out_default_method).c_str());
- return false;
+ if (UNLIKELY(chosen_iface.Get() != nullptr)) {
+ // We have multiple default impls of the same method. This is a potential default conflict.
+ // We need to check if this possibly conflicting method is either a superclass of the chosen
+ // default implementation or is overridden by a non-default interface method. In either case
+ // there is no conflict.
+ if (!iface->IsAssignableFrom(chosen_iface.Get()) &&
+ !ContainsOverridingMethodOf(self,
+ target_name_comparator,
+ iftable,
+ k,
+ iface,
+ image_pointer_size_)) {
+ LOG(WARNING) << "Conflicting default method implementations found: "
+ << PrettyMethod(current_method) << " and "
+ << PrettyMethod(*out_default_method) << " in class "
+ << PrettyClass(klass.Get()) << " conflict.";
+ *out_default_method = nullptr;
+ return DefaultMethodSearchResult::kDefaultConflict;
} else {
break; // Continue checking at the next interface.
}
} else {
- *out_default_method = current_method;
- chosen_iface = iface;
- // We should now finish traversing the graph to find if we have default methods that
- // conflict.
- break;
+ // chosen_iface == null
+ if (!ContainsOverridingMethodOf(self,
+ target_name_comparator,
+ iftable,
+ k,
+ iface,
+ image_pointer_size_)) {
+ // Don't set this as the chosen interface if something else is overriding it (because that
+ // other interface would be potentially chosen instead if it was default). If the other
+ // interface was abstract then we wouldn't select this interface as chosen anyway since
+ // the abstract method masks it.
+ *out_default_method = current_method;
+ chosen_iface.Assign(iface.Get());
+ // We should now finish traversing the graph to find if we have default methods that
+ // conflict.
+ } else {
+ VLOG(class_linker) << "A default method '" << PrettyMethod(current_method) << "' was "
+ << "skipped because it was overridden by an abstract method in a "
+ << "subinterface on class '" << PrettyClass(klass.Get()) << "'";
+ }
}
- }
- if (k == 0) {
break;
}
}
- return true;
+ if (*out_default_method != nullptr) {
+ VLOG(class_linker) << "Default method '" << PrettyMethod(*out_default_method) << "' selected "
+ << "as the implementation for '" << PrettyMethod(target_method) << "' "
+ << "in '" << PrettyClass(klass.Get()) << "'";
+ return DefaultMethodSearchResult::kDefaultFound;
+ } else {
+ return DefaultMethodSearchResult::kAbstractFound;
+ }
}
// Sets imt_ref appropriately for LinkInterfaceMethods.
@@ -4912,7 +5042,7 @@ bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
// Otherwise it will set the conflict method which will figure out which method to use during
// runtime.
static void SetIMTRef(ArtMethod* unimplemented_method,
- ArtMethod* conflict_method,
+ ArtMethod* imt_conflict_method,
size_t image_pointer_size,
ArtMethod* current_method,
/*out*/ArtMethod** imt_ref)
@@ -4920,7 +5050,7 @@ static void SetIMTRef(ArtMethod* unimplemented_method,
// Place method in imt if entry is empty, place conflict otherwise.
if (*imt_ref == unimplemented_method) {
*imt_ref = current_method;
- } else if (*imt_ref != conflict_method) {
+ } else if (*imt_ref != imt_conflict_method) {
// If we are not a conflict and we have the same signature and name as the imt
// entry, it must be that we overwrote a superclass vtable entry.
MethodNameAndSignatureComparator imt_comparator(
@@ -4929,7 +5059,7 @@ static void SetIMTRef(ArtMethod* unimplemented_method,
current_method->GetInterfaceMethodIfProxy(image_pointer_size))) {
*imt_ref = current_method;
} else {
- *imt_ref = conflict_method;
+ *imt_ref = imt_conflict_method;
}
}
}
@@ -5136,10 +5266,23 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class>
return true;
}
+// Finds the method with a name/signature that matches cmp in the given list of methods. The list of
+// methods must be unique.
+static ArtMethod* FindSameNameAndSignature(MethodNameAndSignatureComparator& cmp,
+ const ScopedArenaVector<ArtMethod*>& list)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ for (ArtMethod* method : list) {
+ if (cmp.HasSameNameAndSignature(method)) {
+ return method;
+ }
+ }
+ return nullptr;
+}
+
bool ClassLinker::LinkInterfaceMethods(
Thread* self,
Handle<mirror::Class> klass,
- const std::unordered_map<size_t, ArtMethod*>& default_translations,
+ const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations,
ArtMethod** out_imt) {
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
@@ -5162,12 +5305,14 @@ bool ClassLinker::LinkInterfaceMethods(
// Use the linear alloc pool since this one is in the low 4gb for the compiler.
ArenaStack stack(runtime->GetLinearAlloc()->GetArenaPool());
ScopedArenaAllocator allocator(&stack);
+
+ ScopedArenaVector<ArtMethod*> default_conflict_methods(allocator.Adapter());
ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter());
ScopedArenaVector<ArtMethod*> default_methods(allocator.Adapter());
MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
- ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
+ ArtMethod* const imt_conflict_method = runtime->GetImtConflictMethod();
// Copy the IMT from the super class if possible.
bool extend_super_iftable = false;
if (has_superclass) {
@@ -5203,8 +5348,8 @@ bool ClassLinker::LinkInterfaceMethods(
auto** imt_ref = &out_imt[imt_index];
if (*imt_ref == unimplemented_method) {
*imt_ref = method;
- } else if (*imt_ref != conflict_method) {
- *imt_ref = conflict_method;
+ } else if (*imt_ref != imt_conflict_method) {
+ *imt_ref = imt_conflict_method;
}
}
}
@@ -5239,7 +5384,16 @@ bool ClassLinker::LinkInterfaceMethods(
auto* old_cause = self->StartAssertNoThreadSuspension(
"Copying ArtMethods for LinkInterfaceMethods");
- for (size_t i = 0; i < ifcount; ++i) {
+ // Going in reverse to ensure that we will hit abstract methods that override defaults before the
+ // defaults. This means we don't need to do any trickery when creating the Miranda methods, since
+ // they will already be null. This has the additional benefit that the declarer of a miranda
+ // method will actually declare an abstract method.
+ for (size_t i = ifcount; i != 0; ) {
+ --i;
+
+ DCHECK_GE(i, 0u);
+ DCHECK_LT(i, ifcount);
+
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
StackHandleScope<2> hs2(self);
@@ -5250,6 +5404,11 @@ bool ClassLinker::LinkInterfaceMethods(
LengthPrefixedArray<ArtMethod>* input_virtual_methods = nullptr;
Handle<mirror::PointerArray> input_vtable_array = NullHandle<mirror::PointerArray>();
int32_t input_array_length = 0;
+ // TODO Cleanup Needed: In the presence of default methods this optimization is rather dirty
+ // and confusing. Default methods should always look through all the superclasses
+ // because they are the last choice of an implementation. We get around this by looking
+ // at the super-classes iftable methods (copied into method_array previously) when we are
+ // looking for the implementation of a super-interface method but that is rather dirty.
if (super_interface) {
// We are overwriting a super class interface, try to only virtual methods instead of the
// whole vtable.
@@ -5279,8 +5438,7 @@ bool ClassLinker::LinkInterfaceMethods(
//
// To find defaults we need to do the same but also go over interfaces.
bool found_impl = false;
- ArtMethod* default_impl = nullptr;
- bool found_default_impl = false;
+ ArtMethod* vtable_impl = nullptr;
for (int32_t k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
&input_virtual_methods->At(k, method_size, method_alignment) :
@@ -5297,77 +5455,138 @@ bool ClassLinker::LinkInterfaceMethods(
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str());
return false;
- } else if (vtable_method->IsDefault()) {
+ } else if (UNLIKELY(vtable_method->IsOverridableByDefaultMethod())) {
// We might have a newer, better, default method for this, so we just skip it. If we
// are still using this we will select it again when scanning for default methods. To
// obviate the need to copy the method again we will make a note that we already found
// a default here.
// TODO This should be much cleaner.
- found_default_impl = true;
- default_impl = vtable_method;
+ vtable_impl = vtable_method;
break;
} else {
found_impl = true;
+ method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
+ // Place method in imt if entry is empty, place conflict otherwise.
+ SetIMTRef(unimplemented_method,
+ imt_conflict_method,
+ image_pointer_size_,
+ vtable_method,
+ /*out*/imt_ptr);
+ break;
}
- method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
- // Place method in imt if entry is empty, place conflict otherwise.
- SetIMTRef(unimplemented_method,
- conflict_method,
- image_pointer_size_,
- vtable_method,
- /*out*/imt_ptr);
- break;
}
}
- // We should only search for default implementations when the class does not implement the
- // method directly and either (1) the interface is newly implemented on this class and not
- // on any of its superclasses, (2) the superclass's implementation is a default method, or
- // (3) the superclass does not have an implementation.
- if (!found_impl && (!super_interface ||
- method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_)
- ->IsOverridableByDefaultMethod())) {
- ArtMethod* current_method = nullptr;
- std::string icce_message;
- if (!FindDefaultMethodImplementation(self,
- interface_method,
- klass,
- /*out*/&current_method,
- /*out*/&icce_message)) {
- // There was a conflict with default method implementations.
- self->EndAssertNoThreadSuspension(old_cause);
- // TODO This should actually be thrown when we attempt to invoke this method.
- ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
- return false;
- } else if (current_method != nullptr) {
- if (found_default_impl &&
- current_method->GetDeclaringClass() == default_impl->GetDeclaringClass()) {
+ // Continue on to the next method if we are done.
+ if (LIKELY(found_impl)) {
+ continue;
+ } else if (LIKELY(super_interface)) {
+ // Don't look for a default implementation when the super-method is implemented directly
+ // by the class.
+ //
+ // See if we can use the superclasses method and skip searching everything else.
+ // Note: !found_impl && super_interface
+ CHECK(extend_super_iftable);
+ // If this is a super_interface method it is possible we shouldn't override it because a
+ // superclass could have implemented it directly. We get the method the superclass used
+ // to implement this to know if we can override it with a default method. Doing this is
+ // safe since we know that the super_iftable is filled in so we can simply pull it from
+ // there. We don't bother if this is not a super-classes interface since in that case we
+ // have scanned the entire vtable anyway and would have found it.
+ // TODO This is rather dirty but it is faster than searching through the entire vtable
+ // every time.
+ ArtMethod* supers_method =
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ DCHECK(supers_method != nullptr);
+ DCHECK(interface_name_comparator.HasSameNameAndSignature(supers_method));
+ if (!supers_method->IsOverridableByDefaultMethod()) {
+ // The method is not overridable by a default method (i.e. it is directly implemented
+ // in some class). Therefore move onto the next interface method.
+ continue;
+ }
+ }
+ // If we haven't found it yet we should search through the interfaces for default methods.
+ ArtMethod* current_method = nullptr;
+ switch (FindDefaultMethodImplementation(self,
+ interface_method,
+ klass,
+ /*out*/&current_method)) {
+ case DefaultMethodSearchResult::kDefaultConflict: {
+ // Default method conflict.
+ DCHECK(current_method == nullptr);
+ ArtMethod* default_conflict_method = nullptr;
+ if (vtable_impl != nullptr && vtable_impl->IsDefaultConflicting()) {
+ // We can reuse the method from the superclass, don't bother adding it to virtuals.
+ default_conflict_method = vtable_impl;
+ } else {
+ // See if we already have a conflict method for this method.
+ ArtMethod* preexisting_conflict = FindSameNameAndSignature(interface_name_comparator,
+ default_conflict_methods);
+ if (LIKELY(preexisting_conflict != nullptr)) {
+ // We already have another conflict we can reuse.
+ default_conflict_method = preexisting_conflict;
+ } else {
+ // Create a new conflict method for this to use.
+ default_conflict_method =
+ reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
+ new(default_conflict_method) ArtMethod(interface_method, image_pointer_size_);
+ default_conflict_methods.push_back(default_conflict_method);
+ }
+ }
+ current_method = default_conflict_method;
+ break;
+ }
+ case DefaultMethodSearchResult::kDefaultFound: {
+ DCHECK(current_method != nullptr);
+ // Found a default method.
+ if (vtable_impl != nullptr &&
+ current_method->GetDeclaringClass() == vtable_impl->GetDeclaringClass()) {
// We found a default method but it was the same one we already have from our
// superclass. Don't bother adding it to our vtable again.
- current_method = default_impl;
+ current_method = vtable_impl;
} else {
- // We found a default method implementation and there were no conflicts.
- // Save the default method. We need to add it to the vtable.
- default_methods.push_back(current_method);
+ // Only record this default method if it is new to save space.
+ ArtMethod* old = FindSameNameAndSignature(interface_name_comparator, default_methods);
+ if (old == nullptr) {
+ // We found a default method implementation and there were no conflicts.
+ // Save the default method. We need to add it to the vtable.
+ default_methods.push_back(current_method);
+ } else {
+ CHECK(old == current_method) << "Multiple default implementations selected!";
+ }
}
- method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
- SetIMTRef(unimplemented_method,
- conflict_method,
- image_pointer_size_,
- current_method,
- /*out*/imt_ptr);
- found_impl = true;
+ break;
}
- }
- if (!found_impl && !super_interface) {
- // It is defined in this class or any of its subclasses.
- ArtMethod* miranda_method = nullptr;
- for (auto& mir_method : miranda_methods) {
- if (interface_name_comparator.HasSameNameAndSignature(mir_method)) {
- miranda_method = mir_method;
- break;
+ case DefaultMethodSearchResult::kAbstractFound: {
+ DCHECK(current_method == nullptr);
+ // Abstract method masks all defaults.
+ if (vtable_impl != nullptr &&
+ vtable_impl->IsAbstract() &&
+ !vtable_impl->IsDefaultConflicting()) {
+ // We need to make this an abstract method but the version in the vtable already is so
+ // don't do anything.
+ current_method = vtable_impl;
}
+ break;
}
+ }
+ if (current_method != nullptr) {
+ // We found a default method implementation. Record it in the iftable and IMT.
+ method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
+ SetIMTRef(unimplemented_method,
+ imt_conflict_method,
+ image_pointer_size_,
+ current_method,
+ /*out*/imt_ptr);
+ } else if (!super_interface) {
+ // We could not find an implementation for this method and since it is a brand new
+ // interface we searched the entire vtable (and all default methods) for an implementation
+ // but couldn't find one. We therefore need to make a miranda method.
+ //
+ // Find out if there is already a miranda method we can use.
+ ArtMethod* miranda_method = FindSameNameAndSignature(interface_name_comparator,
+ miranda_methods);
if (miranda_method == nullptr) {
+ DCHECK(interface_method->IsAbstract()) << PrettyMethod(interface_method);
miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
CHECK(miranda_method != nullptr);
// Point the interface table at a phantom slot.
@@ -5379,10 +5598,15 @@ bool ClassLinker::LinkInterfaceMethods(
}
}
}
- if (!miranda_methods.empty() || !default_methods.empty()) {
+ if (!miranda_methods.empty() || !default_methods.empty() || !default_conflict_methods.empty()) {
+ VLOG(class_linker) << PrettyClass(klass.Get()) << ": miranda_methods=" << miranda_methods.size()
+ << " default_methods=" << default_methods.size()
+ << " default_conflict_methods=" << default_conflict_methods.size();
const size_t old_method_count = klass->NumVirtualMethods();
- const size_t new_method_count =
- old_method_count + miranda_methods.size() + default_methods.size();
+ const size_t new_method_count = old_method_count +
+ miranda_methods.size() +
+ default_methods.size() +
+ default_conflict_methods.size();
// Attempt to realloc to save RAM if possible.
LengthPrefixedArray<ArtMethod>* old_virtuals = klass->GetVirtualMethodsPtr();
// The Realloced virtual methods aren't visiblef from the class roots, so there is no issue
@@ -5440,15 +5664,32 @@ bool ClassLinker::LinkInterfaceMethods(
for (ArtMethod* def_method : default_methods) {
ArtMethod& new_method = *out;
new_method.CopyFrom(def_method, image_pointer_size_);
- new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccDefault);
// Clear the preverified flag if it is present. Since this class hasn't been verified yet it
// shouldn't have methods that are preverified.
// TODO This is rather arbitrary. We should maybe support classes where only some of its
// methods are preverified.
- new_method.SetAccessFlags(new_method.GetAccessFlags() & ~kAccPreverified);
+ new_method.SetAccessFlags((new_method.GetAccessFlags() | kAccDefault) & ~kAccPreverified);
move_table.emplace(def_method, &new_method);
++out;
}
+ for (ArtMethod* conf_method : default_conflict_methods) {
+ ArtMethod& new_method = *out;
+ new_method.CopyFrom(conf_method, image_pointer_size_);
+ // This is a type of default method (there are default method impls, just a conflict) so mark
+ // this as a default, non-abstract method, since thats what it is. Also clear the preverified
+ // bit since this class hasn't been verified yet it shouldn't have methods that are
+ // preverified.
+ constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict;
+ constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccPreverified);
+ new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
+ DCHECK(new_method.IsDefaultConflicting());
+ // The actual method might or might not be marked abstract since we just copied it from a
+ // (possibly default) interface method. We need to set it entry point to be the bridge so that
+ // the compiler will not invoke the implementation of whatever method we copied from.
+ EnsureThrowsInvocationError(&new_method);
+ move_table.emplace(conf_method, &new_method);
+ ++out;
+ }
virtuals->SetSize(new_method_count);
UpdateClassVirtualMethods(klass.Get(), virtuals);
// Done copying methods, they are all roots in the class now, so we can end the no thread
@@ -5456,8 +5697,10 @@ bool ClassLinker::LinkInterfaceMethods(
self->EndAssertNoThreadSuspension(old_cause);
const size_t old_vtable_count = vtable->GetLength();
- const size_t new_vtable_count =
- old_vtable_count + miranda_methods.size() + default_methods.size();
+ const size_t new_vtable_count = old_vtable_count +
+ miranda_methods.size() +
+ default_methods.size() +
+ default_conflict_methods.size();
miranda_methods.clear();
vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
@@ -5482,9 +5725,27 @@ bool ClassLinker::LinkInterfaceMethods(
auto translation_it = default_translations.find(i);
bool found_translation = false;
if (translation_it != default_translations.end()) {
- size_t vtable_index;
- std::tie(vtable_index, translated_method) = *translation_it;
- DCHECK_EQ(vtable_index, i);
+ if (translation_it->second.IsInConflict()) {
+ // Find which conflict method we are to use for this method.
+ MethodNameAndSignatureComparator old_method_comparator(
+ translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ ArtMethod* new_conflict_method = FindSameNameAndSignature(old_method_comparator,
+ default_conflict_methods);
+ CHECK(new_conflict_method != nullptr) << "Expected a conflict method!";
+ translated_method = new_conflict_method;
+ } else if (translation_it->second.IsAbstract()) {
+ // Find which miranda method we are to use for this method.
+ MethodNameAndSignatureComparator old_method_comparator(
+ translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ ArtMethod* miranda_method = FindSameNameAndSignature(old_method_comparator,
+ miranda_methods);
+ DCHECK(miranda_method != nullptr);
+ translated_method = miranda_method;
+ } else {
+ // Normal default method (changed from an older default or abstract interface method).
+ DCHECK(translation_it->second.IsTranslation());
+ translated_method = translation_it->second.GetTranslation();
+ }
found_translation = true;
}
DCHECK(translated_method != nullptr);
@@ -6191,8 +6452,8 @@ void ClassLinker::SetEntryPointsToInterpreter(ArtMethod* method) const {
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
+ if (dex_cache_boot_image_class_lookup_required_) {
+ AddBootImageClassesToClassTable();
}
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
@@ -6229,8 +6490,8 @@ size_t ClassLinker::NumNonZygoteClasses() const {
}
size_t ClassLinker::NumLoadedClasses() {
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
+ if (dex_cache_boot_image_class_lookup_required_) {
+ AddBootImageClassesToClassTable();
}
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index cae62ef50f..a72b58602f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -487,10 +487,17 @@ class ClassLinker {
return class_roots;
}
- // Move all of the image classes into the class table for faster lookups.
- void MoveImageClassesToClassTable()
+ // Move all of the boot image classes into the class table for faster lookups.
+ void AddBootImageClassesToClassTable()
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Add image classes to the class table.
+ void AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+ mirror::ClassLoader* class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
@@ -544,6 +551,17 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ struct DexCacheData {
+ // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
+ // not work properly.
+ jweak weak_root;
+ // The following two fields are caches to the DexCache's fields and here to avoid unnecessary
+ // jweak decode that triggers read barriers (and mark them alive unnecessarily and mess with
+ // class unloading.)
+ const DexFile* dex_file;
+ GcRoot<mirror::Class>* resolved_types;
+ };
+
private:
struct ClassLoaderData {
jweak weak_root; // Weak root to enable class unloading.
@@ -721,6 +739,83 @@ class ClassLinker {
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Does anything needed to make sure that the compiler will not generate a direct invoke to this
+ // method. Should only be called on non-invokable methods.
+ void EnsureThrowsInvocationError(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // A wrapper class representing the result of a method translation used for linking methods and
+ // updating superclass default methods. For each method in a classes vtable there are 4 states it
+ // could be in:
+ // 1) No translation is necessary. In this case there is no MethodTranslation object for it. This
+ // is the standard case and is true when the method is not overridable by a default method,
+ // the class defines a concrete implementation of the method, the default method implementation
+ // remains the same, or an abstract method stayed abstract.
+ // 2) The method must be translated to a different default method. We note this with
+ // CreateTranslatedMethod.
+ // 3) The method must be replaced with a conflict method. This happens when a superclass
+ // implements an interface with a default method and this class implements an unrelated
+ // interface that also defines that default method. We note this with CreateConflictingMethod.
+ // 4) The method must be replaced with an abstract miranda method. This happens when a superclass
+ // implements an interface with a default method and this class implements a subinterface of
+ // the superclass's interface which declares the default method abstract. We note this with
+ // CreateAbstractMethod.
+ //
+ // When a method translation is unnecessary (case #1), we don't put it into the
+ // default_translation maps. So an instance of MethodTranslation must be in one of #2-#4.
+ class MethodTranslation {
+ public:
+ // This slot must become a default conflict method.
+ static MethodTranslation CreateConflictingMethod() {
+ return MethodTranslation(Type::kConflict, /*translation*/nullptr);
+ }
+
+ // This slot must become an abstract method.
+ static MethodTranslation CreateAbstractMethod() {
+ return MethodTranslation(Type::kAbstract, /*translation*/nullptr);
+ }
+
+ // Use the given method as the current value for this vtable slot during translation.
+ static MethodTranslation CreateTranslatedMethod(ArtMethod* new_method) {
+ return MethodTranslation(Type::kTranslation, new_method);
+ }
+
+ // Returns true if this is a method that must become a conflict method.
+ bool IsInConflict() const {
+ return type_ == Type::kConflict;
+ }
+
+ // Returns true if this is a method that must become an abstract method.
+ bool IsAbstract() const {
+ return type_ == Type::kAbstract;
+ }
+
+ // Returns true if this is a method that must become a different method.
+ bool IsTranslation() const {
+ return type_ == Type::kTranslation;
+ }
+
+ // Get the translated version of this method.
+ ArtMethod* GetTranslation() const {
+ DCHECK(IsTranslation());
+ DCHECK(translation_ != nullptr);
+ return translation_;
+ }
+
+ private:
+ enum class Type {
+ kTranslation,
+ kConflict,
+ kAbstract,
+ };
+
+ MethodTranslation(Type type, ArtMethod* translation)
+ : translation_(translation), type_(type) {}
+
+ ArtMethod* const translation_;
+ const Type type_;
+ };
+
// Links the virtual methods for the given class and records any default methods that will need to
// be updated later.
//
@@ -737,9 +832,10 @@ class ClassLinker {
// scan, we therefore store the vtable index's that might need to be
// updated with the method they will turn into.
// TODO This whole default_translations thing is very dirty. There should be a better way.
- bool LinkVirtualMethods(Thread* self,
- Handle<mirror::Class> klass,
- /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations)
+ bool LinkVirtualMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ /*out*/std::unordered_map<size_t, MethodTranslation>* default_translations)
SHARED_REQUIRES(Locks::mutator_lock_);
// Sets up the interface lookup table (IFTable) in the correct order to allow searching for
@@ -749,6 +845,13 @@ class ClassLinker {
Handle<mirror::ObjectArray<mirror::Class>> interfaces)
SHARED_REQUIRES(Locks::mutator_lock_);
+
+ enum class DefaultMethodSearchResult {
+ kDefaultFound,
+ kAbstractFound,
+ kDefaultConflict
+ };
+
// Find the default method implementation for 'interface_method' in 'klass', if one exists.
//
// Arguments:
@@ -756,31 +859,31 @@ class ClassLinker {
// * target_method - The method we are trying to find a default implementation for.
// * klass - The class we are searching for a definition of target_method.
// * out_default_method - The pointer we will store the found default method to on success.
- // * icce_message - A string we will store an appropriate IncompatibleClassChangeError message
- // into in case of failure. Note we must do it this way since we do not know
- // whether we can allocate the exception object, which could cause us to go to
- // sleep.
//
// Return value:
- // * True - There were no conflicting method implementations found in the class while searching
- // for target_method. The default method implementation is stored into out_default_method
- // if it was found. Otherwise *out_default_method will be set to nullptr.
- // * False - Conflicting method implementations were found when searching for target_method. The
- // value of *out_default_method is undefined and *icce_message is a string that should
- // be used to create an IncompatibleClassChangeError as soon as possible.
- bool FindDefaultMethodImplementation(Thread* self,
- ArtMethod* target_method,
- Handle<mirror::Class> klass,
- /*out*/ArtMethod** out_default_method,
- /*out*/std::string* icce_message) const
+ // * kDefaultFound - There were no conflicting method implementations found in the class while
+ // searching for target_method. The default method implementation is stored into
+ // out_default_method.
+ // * kAbstractFound - There were no conflicting method implementations found in the class while
+ // searching for target_method but no default implementation was found either.
+ // out_default_method is set to null and the method should be considered not
+ // implemented.
+ // * kDefaultConflict - Conflicting method implementations were found when searching for
+ // target_method. The value of *out_default_method is null.
+ DefaultMethodSearchResult FindDefaultMethodImplementation(
+ Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method) const
SHARED_REQUIRES(Locks::mutator_lock_);
// Sets the imt entries and fixes up the vtable for the given class by linking all the interface
// methods. See LinkVirtualMethods for an explanation of what default_translations is.
- bool LinkInterfaceMethods(Thread* self,
- Handle<mirror::Class> klass,
- const std::unordered_map<size_t, ArtMethod*>& default_translations,
- ArtMethod** out_imt)
+ bool LinkInterfaceMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ const std::unordered_map<size_t, MethodTranslation>& default_translations,
+ ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
@@ -810,7 +913,8 @@ class ClassLinker {
size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
- const std::list<jweak>& GetDexCaches() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ const std::list<DexCacheData>& GetDexCachesData()
+ SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
return dex_caches_;
}
@@ -824,7 +928,7 @@ class ClassLinker {
void EnsurePreverifiedMethods(Handle<mirror::Class> c)
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* LookupClassFromImage(const char* descriptor)
+ mirror::Class* LookupClassFromBootImage(const char* descriptor)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns null if not found.
@@ -873,9 +977,9 @@ class ClassLinker {
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // JNI weak globals to allow dex caches to get unloaded. We lazily delete weak globals when we
- // register new dex files.
- std::list<jweak> dex_caches_ GUARDED_BY(dex_lock_);
+ // JNI weak globals and side data to allow dex caches to get unloaded. We lazily delete weak
+ // globals when we register new dex files.
+ std::list<DexCacheData> dex_caches_ GUARDED_BY(dex_lock_);
// This contains the class loaders which have class tables. It is populated by
// InsertClassTableForClassLoader.
@@ -888,8 +992,8 @@ class ClassLinker {
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // Do we need to search dex caches to find image classes?
- bool dex_cache_image_class_lookup_required_;
+ // Do we need to search dex caches to find boot image classes?
+ bool dex_cache_boot_image_class_lookup_required_;
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index a474ae6361..6da2bef214 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -174,13 +174,6 @@ class CheckJniAbortCatcher {
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
-// TODO: When read barrier works with the compiler, get rid of this.
-#define TEST_DISABLED_FOR_READ_BARRIER() \
- if (kUseReadBarrier) { \
- printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \
- return; \
- }
-
#define TEST_DISABLED_FOR_MIPS() \
if (kRuntimeISA == kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index de692d1368..d68b463950 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -242,6 +242,15 @@ void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt,
va_end(args);
}
+void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
+ DCHECK(method != nullptr);
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
+ /*referrer*/nullptr,
+ StringPrintf("Conflicting default method implementations %s",
+ PrettyMethod(method).c_str()).c_str());
+}
+
+
// IOException
void ThrowIOException(const char* fmt, ...) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 2402e6f7a0..2a0934fb5b 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -120,6 +120,9 @@ void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt,
__attribute__((__format__(__printf__, 2, 3)))
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index e7877b2e78..1e44f509f1 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -722,6 +722,7 @@ class DexFile {
//
const CodeItem* GetCodeItem(const uint32_t code_off) const {
+ DCHECK_LT(code_off, size_) << "Code item offset larger then maximum allowed offset";
if (code_off == 0) {
return nullptr; // native or abstract method
} else {
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 3d3f7a1bdb..27865e3dc4 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -31,12 +31,12 @@ namespace art {
namespace mirror {
class Array;
class Class;
+template<class MirrorType> class CompressedReference;
class Object;
-template<class MirrorType>
-class CompressedReference;
} // namespace mirror
class ArtMethod;
+template<class MirrorType> class GcRoot;
class Thread;
// Pointers to functions that are called by quick compiler generated code via thread-local storage.
@@ -72,9 +72,14 @@ extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_o
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+
// Read barrier entrypoints.
-// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to this function directly.
-// For x86 and x86_64, compilers need a wrapper assembly function, to handle mismatch in ABI.
+//
+// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to these
+// functions directly. For x86 and x86-64, compilers need a wrapper
+// assembly function, to handle mismatch in ABI.
+
+// Read barrier entrypoint for heap references.
// This is the read barrier slow path for instance and static fields and reference-type arrays.
// TODO: Currently the read barrier does not have a fast path for compilers to directly generate.
// Ideally the slow path should only take one parameter "ref".
@@ -82,6 +87,10 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Objec
uint32_t offset)
SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+// Read barrier entrypoint for GC roots.
+extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root)
+ SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 73d8ae76ae..ee7b986094 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -146,7 +146,8 @@
V(NewStringFromStringBuilder, void) \
\
V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
- V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t)
+ V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t) \
+ V(ReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*)
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 7361d34b6a..7ec5fc50e1 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -14,14 +14,16 @@
* limitations under the License.
*/
+#include <stdint.h>
+
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "gc_root-inl.h"
#include "mirror/class-inl.h"
-
-#include <stdint.h>
+#include "mirror/object_reference.h"
namespace art {
@@ -560,13 +562,25 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj
// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only
// take one parameter "ref", which is given by the fast path.
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
- mirror::Object* obj, uint32_t offset) {
- DCHECK(kUseReadBarrier);
+ mirror::Object* obj,
+ uint32_t offset) {
+ DCHECK(kEmitCompilerReadBarrier);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
mirror::HeapReference<mirror::Object>* ref_addr =
- reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
- return ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, true>(obj, MemberOffset(offset),
- ref_addr);
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
+ constexpr ReadBarrierOption kReadBarrierOption =
+ kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+ mirror::Object* result =
+ ReadBarrier::Barrier<mirror::Object, kReadBarrierOption, true>(obj,
+ MemberOffset(offset),
+ ref_addr);
+ return result;
+}
+
+extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ // TODO: Pass a GcRootSource object as second argument to GcRoot::Read?
+ return root->Read();
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5eda6d6bd3..abf9ac49e6 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -645,8 +645,8 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// frame.
ScopedQuickEntrypointChecks sqec(self);
- if (method->IsAbstract()) {
- ThrowAbstractMethodError(method);
+ if (UNLIKELY(!method->IsInvokable())) {
+ method->ThrowInvocationTimeError();
return 0;
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 78f56eef8d..8587edee54 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -302,8 +302,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierSlow, pReadBarrierForRootSlow,
+ sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierSlow)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierForRootSlow)
+ sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 1361f7bcc5..8f7bb946c3 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -487,7 +487,7 @@ void ModUnionTableCardCache::ClearCards() {
// Mark all references to the alloc space(s).
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
- auto* image_space = heap_->GetImageSpace();
+ auto* image_space = heap_->GetBootImageSpace();
// If we don't have an image space, just pass in space_ as the immune space. Pass in the same
// space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
CardBitVisitor bit_visitor(visitor, space_, image_space != nullptr ? image_space : space_,
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 4a49712cbc..f4cf3ae260 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -23,7 +23,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
#include "intern_table.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -358,13 +358,17 @@ void ConcurrentCopying::MarkingPhase() {
// scan the image objects from roots by relying on the card table,
// but it's necessary for the RB to-space invariant to hold.
TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
- gc::space::ImageSpace* image = heap_->GetImageSpace();
- if (image != nullptr) {
- mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
- mirror::Object* marked_image_root = Mark(image_root);
- CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
- if (ReadBarrier::kEnableToSpaceInvariantChecks) {
- AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
+ for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ gc::space::ImageSpace* image = space->AsImageSpace();
+ if (image != nullptr) {
+ mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
+ mirror::Object* marked_image_root = Mark(image_root);
+ CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
+ if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
+ }
+ }
}
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index fd2426b716..da9a79e1a2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -233,7 +233,8 @@ Heap::Heap(size_t initial_size,
backtrace_lock_(nullptr),
seen_backtrace_count_(0u),
unique_backtrace_count_(0u),
- gc_disabled_for_shutdown_(false) {
+ gc_disabled_for_shutdown_(false),
+ boot_image_space_(nullptr) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -262,15 +263,16 @@ Heap::Heap(size_t initial_size,
if (!image_file_name.empty()) {
ATRACE_BEGIN("ImageSpace::Create");
std::string error_msg;
- auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
+ boot_image_space_ = space::ImageSpace::Create(image_file_name.c_str(),
+ image_instruction_set,
&error_msg);
ATRACE_END();
- if (image_space != nullptr) {
- AddSpace(image_space);
+ if (boot_image_space_ != nullptr) {
+ AddSpace(boot_image_space_);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
// isn't going to get in the middle
- uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
- CHECK_GT(oat_file_end_addr, image_space->End());
+ uint8_t* oat_file_end_addr = boot_image_space_->GetImageHeader().GetOatFileEnd();
+ CHECK_GT(oat_file_end_addr, boot_image_space_->End());
requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
} else {
LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
@@ -454,11 +456,11 @@ Heap::Heap(size_t initial_size,
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
}
- if (GetImageSpace() != nullptr) {
+ if (GetBootImageSpace() != nullptr) {
// Don't add the image mod union table if we are running without an image, this can crash if
// we use the CardCache implementation.
accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
- "Image mod-union table", this, GetImageSpace());
+ "Image mod-union table", this, GetBootImageSpace());
CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
AddModUnionTable(mod_union_table);
}
@@ -523,12 +525,12 @@ Heap::Heap(size_t initial_size,
garbage_collectors_.push_back(mark_compact_collector_);
}
}
- if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
+ if (GetBootImageSpace() != nullptr && non_moving_space_ != nullptr &&
(is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
// Check that there's no gap between the image space and the non moving space so that the
// immune region won't break (eg. due to a large object allocated in the gap). This is only
// required when we're the zygote or using GSS.
- bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
+ bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(),
non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
@@ -748,15 +750,6 @@ bool Heap::IsCompilingBoot() const {
return true;
}
-bool Heap::HasImageSpace() const {
- for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- return true;
- }
- }
- return false;
-}
-
void Heap::IncrementDisableMovingGC(Thread* self) {
// Need to do this holding the lock to prevent races where the GC is about to run / running when
// we attempt to disable it.
@@ -1209,13 +1202,8 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
-space::ImageSpace* Heap::GetImageSpace() const {
- for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- return space->AsImageSpace();
- }
- }
- return nullptr;
+space::ImageSpace* Heap::GetBootImageSpace() const {
+ return boot_image_space_;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index cc48172f71..e23b1a3166 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -580,9 +580,9 @@ class Heap {
// Unbind any bound bitmaps.
void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
- // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
- // Assumes there is only one image space.
- space::ImageSpace* GetImageSpace() const;
+ // Returns the boot image space. There may be multiple image spaces, but there is only one boot
+ // image space.
+ space::ImageSpace* GetBootImageSpace() const;
// Permenantly disable moving garbage collection.
void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
@@ -660,7 +660,9 @@ class Heap {
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
- bool HasImageSpace() const;
+ bool HasImageSpace() const {
+ return boot_image_space_ != nullptr;
+ }
ReferenceProcessor* GetReferenceProcessor() {
return reference_processor_.get();
@@ -1320,6 +1322,9 @@ class Heap {
// allocating.
bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
+ // Boot image space.
+ space::ImageSpace* boot_image_space_;
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 477e67b3c2..3734bcc7e1 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -198,7 +198,7 @@ class GcRoot {
ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
+ // Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
// CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
// cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
mutable mirror::CompressedReference<mirror::Object> root_;
diff --git a/runtime/globals.h b/runtime/globals.h
index 987a94ea4b..e7ea6f3788 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -87,8 +87,18 @@ static constexpr bool kUseTableLookupReadBarrier = false;
#endif
static constexpr bool kUseBakerOrBrooksReadBarrier = kUseBakerReadBarrier || kUseBrooksReadBarrier;
-static constexpr bool kUseReadBarrier = kUseBakerReadBarrier || kUseBrooksReadBarrier ||
- kUseTableLookupReadBarrier;
+static constexpr bool kUseReadBarrier =
+ kUseBakerReadBarrier || kUseBrooksReadBarrier || kUseTableLookupReadBarrier;
+
+// Debugging flag that forces the generation of read barriers, but
+// does not trigger the use of the concurrent copying GC.
+//
+// TODO: Remove this flag when the read barriers compiler
+// instrumentation is completed.
+static constexpr bool kForceReadBarrier = false;
+// TODO: Likewise, remove this flag when kForceReadBarrier is removed
+// and replace it with kUseReadBarrier.
+static constexpr bool kEmitCompilerReadBarrier = kForceReadBarrier || kUseReadBarrier;
// If true, references within the heap are poisoned (negated).
#ifdef USE_HEAP_POISONING
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 0bc9dd8375..bc2c197d33 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -108,7 +108,7 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
}
void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
- if (method->IsAbstract() || method->IsProxyMethod()) {
+ if (!method->IsInvokable() || method->IsProxyMethod()) {
// Do not change stubs for these methods.
return;
}
@@ -734,7 +734,7 @@ bool Instrumentation::IsDeoptimizedMethodsEmpty() const {
void Instrumentation::Deoptimize(ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
- CHECK(!method->IsAbstract());
+ CHECK(method->IsInvokable());
Thread* self = Thread::Current();
{
@@ -757,7 +757,7 @@ void Instrumentation::Deoptimize(ArtMethod* method) {
void Instrumentation::Undeoptimize(ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
- CHECK(!method->IsAbstract());
+ CHECK(method->IsInvokable());
Thread* self = Thread::Current();
bool empty;
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f4658d5342..e2e4782d00 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -187,7 +187,7 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
if (image_added_to_intern_table_) {
return nullptr;
}
- gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetBootImageSpace();
if (image == nullptr) {
return nullptr; // No image present.
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 7c0594a8bb..d686f749f3 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -262,7 +262,7 @@ static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFr
static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register) {
- DCHECK(!shadow_frame.GetMethod()->IsAbstract());
+ DCHECK(shadow_frame.GetMethod()->IsInvokable());
DCHECK(!shadow_frame.GetMethod()->IsNative());
shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
@@ -318,9 +318,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
if (code_item != nullptr) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
- } else if (method->IsAbstract()) {
+ } else if (!method->IsInvokable()) {
self->EndAssertNoThreadSuspension(old_cause);
- ThrowAbstractMethodError(method);
+ method->ThrowInvocationTimeError();
return;
} else {
DCHECK(method->IsNative());
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index c8650c4238..9f6699f730 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -141,8 +141,9 @@ static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method)
if (UNLIKELY(called_method == nullptr)) {
// The shadow frame should already be pushed, so we don't need to update it.
- } else if (UNLIKELY(called_method->IsAbstract())) {
- ThrowAbstractMethodError(called_method);
+ } else if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError();
+ // We got an error.
// TODO(iam): Also handle the case when the method is non-static, what error do we throw?
// TODO(iam): Also make sure that ACC_LAMBDA is set.
} else if (UNLIKELY(called_method->GetCodeItem() == nullptr)) {
@@ -617,8 +618,8 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
CHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
- } else if (UNLIKELY(called_method->IsAbstract())) {
- ThrowAbstractMethodError(called_method);
+ } else if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError();
result->SetJ(0);
return false;
} else {
@@ -656,8 +657,8 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
CHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
- } else if (UNLIKELY(called_method->IsAbstract())) {
- ThrowAbstractMethodError(called_method);
+ } else if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError();
result->SetJ(0);
return false;
} else {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index b5e28e9314..7cc05f7cd4 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -56,15 +56,17 @@ static bool IsBadJniVersion(int version) {
class SharedLibrary {
public:
SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
- jobject class_loader)
+ jobject class_loader, void* class_loader_allocator)
: path_(path),
handle_(handle),
needs_native_bridge_(false),
class_loader_(env->NewWeakGlobalRef(class_loader)),
+ class_loader_allocator_(class_loader_allocator),
jni_on_load_lock_("JNI_OnLoad lock"),
jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
jni_on_load_thread_id_(self->GetThreadId()),
jni_on_load_result_(kPending) {
+ CHECK(class_loader_allocator_ != nullptr);
}
~SharedLibrary() {
@@ -78,6 +80,10 @@ class SharedLibrary {
return class_loader_;
}
+ const void* GetClassLoaderAllocator() const {
+ return class_loader_allocator_;
+ }
+
const std::string& GetPath() const {
return path_;
}
@@ -169,6 +175,9 @@ class SharedLibrary {
// The ClassLoader this library is associated with, a weak global JNI reference that is
// created/deleted with the scope of the library.
const jweak class_loader_;
+ // Used to do equality check on class loaders so we can avoid decoding the weak root and read
+ // barriers that mess with class unloading.
+ const void* class_loader_allocator_;
// Guards remaining items.
Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -224,11 +233,15 @@ class Libraries {
SHARED_REQUIRES(Locks::mutator_lock_) {
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
- const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
+ mirror::ClassLoader* const declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
ScopedObjectAccessUnchecked soa(Thread::Current());
+ void* const declaring_class_loader_allocator =
+ Runtime::Current()->GetClassLinker()->GetAllocatorForClassLoader(declaring_class_loader);
+ CHECK(declaring_class_loader_allocator != nullptr);
for (const auto& lib : libraries_) {
SharedLibrary* const library = lib.second;
- if (soa.Decode<mirror::ClassLoader*>(library->GetClassLoader()) != declaring_class_loader) {
+ // Use the allocator address for class loader equality to avoid unnecessary weak root decode.
+ if (library->GetClassLoaderAllocator() != declaring_class_loader_allocator) {
// We only search libraries loaded by the appropriate ClassLoader.
continue;
}
@@ -269,7 +282,7 @@ class Libraries {
// If class_loader is a null jobject then it is the boot class loader. We should not unload
// the native libraries of the boot class loader.
if (class_loader != nullptr &&
- soa.Decode<mirror::ClassLoader*>(class_loader) == nullptr) {
+ soa.Self()->IsJWeakCleared(class_loader)) {
void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
if (sym == nullptr) {
VLOG(jni) << "[No JNI_OnUnload found in \"" << library->GetPath() << "\"]";
@@ -667,6 +680,19 @@ mirror::Object* JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, Indirect
return weak_globals_.SynchronizedGet(ref);
}
+bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
+ DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ // When just checking a weak ref has been cleared, avoid triggering the read barrier in decode
+ // (DecodeWeakGlobal) so that we won't accidentally mark the object alive. Since the cleared
+ // sentinel is a non-moving object, we can compare the ref to it without the read barrier and
+ // decide if it's cleared.
+ return Runtime::Current()->IsClearedJniWeakGlobal(weak_globals_.Get<kWithoutReadBarrier>(ref));
+}
+
void JavaVMExt::UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result) {
MutexLock mu(self, weak_globals_lock_);
weak_globals_.Update(ref, result);
@@ -703,8 +729,19 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject
MutexLock mu(self, *Locks::jni_libraries_lock_);
library = libraries_->Get(path);
}
+ void* class_loader_allocator = nullptr;
+ {
+ ScopedObjectAccess soa(env);
+ // As the incoming class loader is reachable/alive during the call of this function,
+ // it's okay to decode it without worrying about unexpectedly marking it alive.
+ mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(class_loader);
+ class_loader_allocator =
+ Runtime::Current()->GetClassLinker()->GetAllocatorForClassLoader(loader);
+ CHECK(class_loader_allocator != nullptr);
+ }
if (library != nullptr) {
- if (env->IsSameObject(library->GetClassLoader(), class_loader) == JNI_FALSE) {
+ // Use the allocator pointers for class loader equality to avoid unnecessary weak root decode.
+ if (library->GetClassLoaderAllocator() != class_loader_allocator) {
// The library will be associated with class_loader. The JNI
// spec says we can't load the same library into more than one
// class loader.
@@ -765,7 +802,7 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject
{
// Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
std::unique_ptr<SharedLibrary> new_library(
- new SharedLibrary(env, self, path, handle, class_loader));
+ new SharedLibrary(env, self, path, handle, class_loader, class_loader_allocator));
MutexLock mu(self, *Locks::jni_libraries_lock_);
library = libraries_->Get(path);
if (library == nullptr) { // We won race to get libraries_lock.
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index c1fbdc0389..618f6faad5 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -149,6 +149,11 @@ class JavaVMExt : public JavaVM {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
+ // Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
+ bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!weak_globals_lock_);
+
Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
return weak_globals_lock_;
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index df6936bf01..f1f4a03861 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -745,6 +745,15 @@ static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
return ERR_NONE;
}
+// Default implementation for IDEs relying on this command.
+static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ request->ReadRefTypeId(); // unused reference type ID
+ request->ReadMethodId(); // unused method ID
+ expandBufAdd1(reply, false); // a method is never obsolete.
+ return ERR_NONE;
+}
+
/*
* Given an object reference, return the runtime type of the object
* (class or array).
@@ -1477,7 +1486,7 @@ static const JdwpHandlerMap gHandlers[] = {
{ 6, 1, M_LineTable, "Method.LineTable" },
{ 6, 2, M_VariableTable, "Method.VariableTable" },
{ 6, 3, M_Bytecodes, "Method.Bytecodes" },
- { 6, 4, nullptr, "Method.IsObsolete" },
+ { 6, 4, M_IsObsolete, "Method.IsObsolete" },
{ 6, 5, M_VariableTableWithGeneric, "Method.VariableTableWithGeneric" },
/* Field command set (8) */
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index b27a8849ed..7458424bac 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -122,7 +122,7 @@ class MANAGED PrimitiveArray : public Array {
T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(CheckIsValidIndex(i));
+ DCHECK(CheckIsValidIndex(i)) << "i=" << i << " length=" << GetLength();
return GetData()[i];
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 91e1cecbdf..3590586228 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -726,12 +726,12 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece&
void Class::SetPreverifiedFlagOnAllMethods(size_t pointer_size) {
DCHECK(IsVerified());
for (auto& m : GetDirectMethods(pointer_size)) {
- if (!m.IsNative() && !m.IsAbstract()) {
+ if (!m.IsNative() && m.IsInvokable()) {
m.SetPreverified();
}
}
for (auto& m : GetVirtualMethods(pointer_size)) {
- if (!m.IsNative() && !m.IsAbstract()) {
+ if (!m.IsNative() && m.IsInvokable()) {
m.SetPreverified();
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c4339b9230..80e136c2cc 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1185,7 +1185,7 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
- // Check that the pointer size mathces the one in the class linker.
+ // Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
static MemberOffset EmbeddedImTableOffset(size_t pointer_size);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 45610dccc8..be869d4e6a 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -254,7 +254,11 @@ CharArray* String::ToCharArray(Thread* self) {
StackHandleScope<1> hs(self);
Handle<String> string(hs.NewHandle(this));
CharArray* result = CharArray::Alloc(self, GetLength());
- memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+ if (result != nullptr) {
+ memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+ } else {
+ self->AssertPendingOOMException();
+ }
return result;
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index fbee2d7bf3..80ebd2cf0f 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -27,6 +27,7 @@ namespace art {
template<class T> class Handle;
struct StringOffsets;
class StringPiece;
+class StubTest_ReadBarrierForRoot_Test;
namespace mirror {
@@ -179,7 +180,7 @@ class MANAGED String FINAL : public Object {
static GcRoot<Class> java_lang_String_;
friend struct art::StringOffsets; // for verifying offset information
- ART_FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount
+ ART_FRIEND_TEST(art::StubTest, ReadBarrierForRoot); // For java_lang_String_.
DISALLOW_IMPLICIT_CONSTRUCTORS(String);
};
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 116cbe9254..9946eabc82 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -50,6 +50,10 @@ static constexpr uint32_t kAccPreverified = 0x00080000; // class (runt
static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
static constexpr uint32_t kAccDefault = 0x00400000; // method (runtime)
+// This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know
+// if any particular method needs to be a default conflict. Used to figure out at runtime if
+// invoking this method will throw an exception.
+static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime)
// Special runtime-only flags.
// Interface and all its super-interfaces with default methods have been recursively initialized.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da21fee3d2..19c71f6d97 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -485,10 +485,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
DCHECK(why == kTimedWaiting || why == kSleeping) << why;
self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
}
- if (self->IsInterruptedLocked()) {
- was_interrupted = true;
- }
- self->SetInterruptedLocked(false);
+ was_interrupted = self->IsInterruptedLocked();
}
}
@@ -522,7 +519,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
monitor_lock_.Unlock(self);
- if (was_interrupted) {
+ if (was_interrupted && interruptShouldThrow) {
/*
* We were interrupted while waiting, or somebody interrupted an
* un-interruptible thread earlier and we're bailing out immediately.
@@ -534,9 +531,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
MutexLock mu(self, *self->GetWaitMutex());
self->SetInterruptedLocked(false);
}
- if (interruptShouldThrow) {
- self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
- }
+ self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 8b2f4d8d24..4cd3c3d730 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -149,8 +149,13 @@ class NullableScopedUtfChars {
void operator=(const NullableScopedUtfChars&);
};
-static jobject DexFile_openDexFileNative(
- JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(JNIEnv* env,
+ jclass,
+ jstring javaSourceName,
+ jstring javaOutputName,
+ jint flags ATTRIBUTE_UNUSED,
+ // class_loader will be used for app images.
+ jobject class_loader ATTRIBUTE_UNUSED) {
ScopedUtfChars sourceName(env, javaSourceName);
if (sourceName.c_str() == nullptr) {
return 0;
@@ -159,7 +164,6 @@ static jobject DexFile_openDexFileNative(
if (env->ExceptionCheck()) {
return 0;
}
-
Runtime* const runtime = Runtime::Current();
ClassLinker* linker = runtime->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
@@ -441,7 +445,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, getDexOptNeeded,
"(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
NATIVE_METHOD(DexFile, openDexFileNative,
- "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+ "(Ljava/lang/String;Ljava/lang/String;ILjava/lang/ClassLoader;)Ljava/lang/Object;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 1d067063a4..a27c9ce5bc 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -171,23 +171,17 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, ji
proc_name = StringPrintf("%u", static_cast<uint32_t>(pid));
}
- std::string profiles_dir(GetDalvikCache("profiles", false /* create_if_absent */));
- if (!profiles_dir.empty()) {
- std::string trace_file = StringPrintf("%s/%s.trace.bin", profiles_dir.c_str(),
- proc_name.c_str());
- Trace::Start(trace_file.c_str(),
- -1,
- buffer_size,
- 0, // TODO: Expose flags.
- output_mode,
- trace_mode,
- 0); // TODO: Expose interval.
- if (thread->IsExceptionPending()) {
- ScopedObjectAccess soa(env);
- thread->ClearException();
- }
- } else {
- LOG(ERROR) << "Profiles dir is empty?!?!";
+ std::string trace_file = StringPrintf("/data/misc/trace/%s.trace.bin", proc_name.c_str());
+ Trace::Start(trace_file.c_str(),
+ -1,
+ buffer_size,
+ 0, // TODO: Expose flags.
+ output_mode,
+ trace_mode,
+ 0); // TODO: Expose interval.
+ if (thread->IsExceptionPending()) {
+ ScopedObjectAccess soa(env);
+ thread->ClearException();
}
}
}
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 99080f611c..0f3a013d53 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -846,7 +846,7 @@ std::string OatFileAssistant::OldProfileFileName() {
std::string OatFileAssistant::ImageLocation() {
Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
if (image_space == nullptr) {
return "";
}
@@ -949,21 +949,23 @@ const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
image_info_load_attempted_ = true;
Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
if (image_space != nullptr) {
cached_image_info_.location = image_space->GetImageLocation();
if (isa_ == kRuntimeISA) {
const ImageHeader& image_header = image_space->GetImageHeader();
cached_image_info_.oat_checksum = image_header.GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
+ image_header.GetOatDataBegin());
cached_image_info_.patch_delta = image_header.GetPatchDelta();
} else {
std::unique_ptr<ImageHeader> image_header(
gc::space::ImageSpace::ReadImageHeaderOrDie(
cached_image_info_.location.c_str(), isa_));
cached_image_info_.oat_checksum = image_header->GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
+ image_header->GetOatDataBegin());
cached_image_info_.patch_delta = image_header->GetPatchDelta();
}
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index c54d7f8761..8c7efb2ea8 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -223,7 +223,7 @@ class OatFileAssistantTest : public CommonRuntimeTest {
false, dex_location.c_str(), &error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
ASSERT_TRUE(image_space != nullptr);
const ImageHeader& image_header = image_space->GetImageHeader();
const OatHeader& oat_header = odex_file->GetOatHeader();
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 9eee156bb0..ea6d3ff3dd 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -79,11 +79,8 @@ const OatFile* OatFileManager::FindOpenedOatFileFromOatLocationLocked(
}
const OatFile* OatFileManager::GetBootOatFile() const {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- return nullptr;
- }
- return image_space->GetOatFile();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
+ return (image_space == nullptr) ? nullptr : image_space->GetOatFile();
}
const OatFile* OatFileManager::GetPrimaryOatFile() const {
diff --git a/runtime/os.h b/runtime/os.h
index befe2e808a..46d89fb8a5 100644
--- a/runtime/os.h
+++ b/runtime/os.h
@@ -39,6 +39,10 @@ class OS {
// already exists, it is *not* overwritten, but unlinked, and a new inode will be used.
static File* CreateEmptyFile(const char* name);
+ // Create an empty file with write access. This is a *new* file, that is, if the file
+ // already exists, it is *not* overwritten, but unlinked, and a new inode will be used.
+ static File* CreateEmptyFileWriteOnly(const char* name);
+
// Open a file with the specified open(2) flags.
static File* OpenFileWithFlags(const char* name, int flags);
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index 675699daea..f45e9f6030 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -35,12 +35,20 @@ File* OS::OpenFileReadWrite(const char* name) {
return OpenFileWithFlags(name, O_RDWR);
}
-File* OS::CreateEmptyFile(const char* name) {
+static File* CreateEmptyFile(const char* name, int extra_flags) {
// In case the file exists, unlink it so we get a new file. This is necessary as the previous
// file may be in use and must not be changed.
unlink(name);
- return OpenFileWithFlags(name, O_RDWR | O_CREAT | O_TRUNC);
+ return OS::OpenFileWithFlags(name, O_CREAT | extra_flags);
+}
+
+File* OS::CreateEmptyFile(const char* name) {
+ return art::CreateEmptyFile(name, O_RDWR | O_TRUNC);
+}
+
+File* OS::CreateEmptyFileWriteOnly(const char* name) {
+ return art::CreateEmptyFile(name, O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
}
File* OS::OpenFileWithFlags(const char* name, int flags) {
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index c7c270946b..62347203a9 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -507,6 +507,8 @@ class ReflectionTest : public CommonCompilerTest {
TEST_F(ReflectionTest, StaticMainMethod) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
+ TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 556ba56532..92a56a9b6e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -528,13 +528,13 @@ bool Runtime::Start() {
// Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
- gc::space::ImageSpace* image_space = heap_->GetImageSpace();
+ gc::space::ImageSpace* image_space = heap_->GetBootImageSpace();
if (image_space != nullptr) {
ATRACE_BEGIN("AddImageStringsToTable");
GetInternTable()->AddImageStringsToTable(image_space);
ATRACE_END();
ATRACE_BEGIN("MoveImageClassesToClassTable");
- GetClassLinker()->MoveImageClassesToClassTable();
+ GetClassLinker()->AddBootImageClassesToClassTable();
ATRACE_END();
}
}
@@ -820,6 +820,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
void Runtime::SetSentinel(mirror::Object* sentinel) {
CHECK(sentinel_.Read() == nullptr);
CHECK(sentinel != nullptr);
+ CHECK(!heap_->IsMovableObject(sentinel));
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
@@ -930,7 +931,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
ATRACE_END();
- if (heap_->GetImageSpace() == nullptr && !allow_dex_file_fallback_) {
+ if (heap_->GetBootImageSpace() == nullptr && !allow_dex_file_fallback_) {
LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
ATRACE_END();
return false;
@@ -1043,7 +1044,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
class_linker_->InitFromImage();
ATRACE_END();
if (kIsDebugBuild) {
- GetHeap()->GetImageSpace()->VerifyImageAllocations();
+ GetHeap()->GetBootImageSpace()->VerifyImageAllocations();
}
if (boot_class_path_string_.empty()) {
// The bootclasspath is not explicitly specified: construct it from the loaded dex files.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 7b5bc1ad9a..3489834f30 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -80,10 +80,7 @@ RUNTIME_OPTIONS_KEY (std::string, PatchOat)
RUNTIME_OPTIONS_KEY (bool, Relocate, kDefaultMustRelocate)
RUNTIME_OPTIONS_KEY (bool, Dex2Oat, true)
RUNTIME_OPTIONS_KEY (bool, ImageDex2Oat, true)
- // kUseReadBarrier currently works with
- // the interpreter only.
- // TODO: make it work with the compiler.
-RUNTIME_OPTIONS_KEY (bool, Interpret, kUseReadBarrier) // -Xint
+RUNTIME_OPTIONS_KEY (bool, Interpret, false) // -Xint
// Disable the compiler for CC (for now).
RUNTIME_OPTIONS_KEY (XGcOption, GcOption) // -Xgc:
RUNTIME_OPTIONS_KEY (gc::space::LargeObjectSpaceType, \
@@ -97,7 +94,7 @@ RUNTIME_OPTIONS_KEY (LogVerbosity, Verbose)
RUNTIME_OPTIONS_KEY (unsigned int, LockProfThreshold)
RUNTIME_OPTIONS_KEY (std::string, StackTraceFile)
RUNTIME_OPTIONS_KEY (Unit, MethodTrace)
-RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data/method-trace-file.bin")
+RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data/misc/trace/method-trace-file.bin")
RUNTIME_OPTIONS_KEY (unsigned int, MethodTraceFileSize, 10 * MB)
RUNTIME_OPTIONS_KEY (Unit, MethodTraceStreaming)
RUNTIME_OPTIONS_KEY (TraceClockSource, ProfileClock, kDefaultTraceClockSource) // -Xprofile:
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d7edfade15..88978bf240 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -865,8 +865,8 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
CHECK(code_start <= pc && pc <= (code_start + code_size))
<< PrettyMethod(method)
<< " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << code_size;
+ << " code_start=" << code_start
+ << " code_size=" << code_size;
}
void StackVisitor::SanityCheckFrame() const {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b0cf418507..63e6326f2f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1886,6 +1886,14 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
return result;
}
+bool Thread::IsJWeakCleared(jweak obj) const {
+ CHECK(obj != nullptr);
+ IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
+ IndirectRefKind kind = GetIndirectRefKind(ref);
+ CHECK_EQ(kind, kWeakGlobal);
+ return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
+}
+
// Implements java.lang.Thread.interrupted.
bool Thread::Interrupted() {
MutexLock mu(Thread::Current(), *wait_mutex_);
@@ -2509,6 +2517,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
+ QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
diff --git a/runtime/thread.h b/runtime/thread.h
index 138c143d34..4624f27564 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -445,6 +445,8 @@ class Thread {
// Convert a jobject into a Object*
mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ // Checks if the weak global ref has been cleared by the GC without decoding it.
+ bool IsJWeakCleared(jweak obj) const SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
return tlsPtr_.monitor_enter_object;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ab342aa882..5815f7a97c 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -331,7 +331,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size,
std::unique_ptr<File> trace_file;
if (output_mode != TraceOutputMode::kDDMS) {
if (trace_fd < 0) {
- trace_file.reset(OS::CreateEmptyFile(trace_filename));
+ trace_file.reset(OS::CreateEmptyFileWriteOnly(trace_filename));
} else {
trace_file.reset(new File(trace_fd, "tracefile"));
trace_file->DisableAutoClose();
diff --git a/test/061-out-of-memory/expected.txt b/test/061-out-of-memory/expected.txt
index ca876299f5..c31980cad7 100644
--- a/test/061-out-of-memory/expected.txt
+++ b/test/061-out-of-memory/expected.txt
@@ -4,4 +4,5 @@ testOomeLarge beginning
testOomeLarge succeeded
testOomeSmall beginning
testOomeSmall succeeded
+Got expected toCharArray OOM
tests succeeded
diff --git a/test/061-out-of-memory/src/Main.java b/test/061-out-of-memory/src/Main.java
index c812c81114..bda978eb2c 100644
--- a/test/061-out-of-memory/src/Main.java
+++ b/test/061-out-of-memory/src/Main.java
@@ -26,6 +26,7 @@ public class Main {
testHugeArray();
testOomeLarge();
testOomeSmall();
+ testOomeToCharArray();
System.out.println("tests succeeded");
}
@@ -106,4 +107,21 @@ public class Main {
}
System.out.println("testOomeSmall succeeded");
}
+
+ private static void testOomeToCharArray() {
+ Object[] o = new Object[2000000];
+ String test = "test";
+ int i = 0;
+ try {
+ for (; i < o.length; ++i) o[i] = new char[1000000];
+ } catch (OutOfMemoryError oom) {}
+ try {
+ for (; i < o.length; ++i) {
+ o[i] = test.toCharArray();
+ }
+ } catch (OutOfMemoryError oom) {
+ o = null;
+ System.out.println("Got expected toCharArray OOM");
+ }
+ }
}
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 3e533ad62e..b6b1c43589 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -35,7 +35,7 @@ class NoPatchoatTest {
}
static bool isRelocationDeltaZero() {
- gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetBootImageSpace();
return space != nullptr && space->GetImageHeader().GetPatchDelta() == 0;
}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 78f884266a..7762b2d2e3 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -92,7 +92,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
// detecting this.
#if __linux__
static bool IsPicImage() {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
CHECK(image_space != nullptr); // We should be running with an image.
const OatFile* oat_file = image_space->GetOatFile();
CHECK(oat_file != nullptr); // We should have an oat file to go with the image.
diff --git a/test/537-checker-jump-over-jump/src/Main.java b/test/537-checker-jump-over-jump/src/Main.java
index fb666eaaea..cf9a69d28e 100644
--- a/test/537-checker-jump-over-jump/src/Main.java
+++ b/test/537-checker-jump-over-jump/src/Main.java
@@ -20,13 +20,25 @@ public class Main {
public static int[] fibs;
/// CHECK-START-X86_64: int Main.test() disassembly (after)
- /// CHECK: If
- /// CHECK-NEXT: cmp
- /// CHECK-NEXT: jnl/ge
- /// CHECK-NOT: jmp
- /// CHECK: ArrayGet
- // Checks that there is no conditional jump over a jmp. The ArrayGet is in
- // the next block.
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+ //
+ /// CHECK: If
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: jnl/ge
+ //
+ /// CHECK-DAG: <<Fibs:l\d+>> StaticFieldGet
+ /// CHECK-DAG: NullCheck [<<Fibs>>]
+ /// CHECK-NOT: jmp
+ /// CHECK-DAG: <<FibsAtZero:i\d+>> ArrayGet [<<Fibs>>,<<Zero>>]
+ /// CHECK-DAG: Return [<<FibsAtZero>>]
+ //
+ // Checks that there is no conditional jump over a `jmp`
+ // instruction. The `ArrayGet` instruction is in the next block.
+ //
+ // Note that the `StaticFieldGet` HIR instruction above (captured as
+ // `Fibs`) can produce a `jmp` x86-64 instruction when read barriers
+ // are enabled (to jump into the read barrier slow path), which is
+ // different from the `jmp` in the `CHECK-NOT` assertion.
public static int test() {
for (int i = 1; ; i++) {
if (i >= FIBCOUNT) {
diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py
index 3c138abf26..c0ba157109 100755
--- a/test/964-default-iface-init-generated/util-src/generate_smali.py
+++ b/test/964-default-iface-init-generated/util-src/generate_smali.py
@@ -477,12 +477,12 @@ def create_test_classes():
ifaces = []
for sub in split:
ifaces.append(list(create_interface_trees(sub)))
- for supers in itertools.product(*ifaces):
- yield TestClass(clone_all(supers))
- for i in range(len(set(dump_tree(supers)) - set(supers))):
- ns = clone_all(supers)
- selected = sorted(set(dump_tree(ns)) - set(ns))[i]
- yield TestClass(tuple([selected] + list(ns)))
+ for supers in itertools.product(*ifaces):
+ yield TestClass(clone_all(supers))
+ for i in range(len(set(dump_tree(supers)) - set(supers))):
+ ns = clone_all(supers)
+ selected = sorted(set(dump_tree(ns)) - set(ns))[i]
+ yield TestClass(tuple([selected] + list(ns)))
def create_interface_trees(num):
"""
diff --git a/test/966-default-conflict/build b/test/966-default-conflict/build
new file mode 100755
index 0000000000..e66e8409c6
--- /dev/null
+++ b/test/966-default-conflict/build
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# TODO: Support running with jack.
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ # Build with the non-conflicting version
+ ${JAVAC} -implicit:none -d classes src/Iface.java build-src/Iface2.java src/Main.java
+ rm classes/Iface2.class
+ # Build with the conflicting version
+ ${JAVAC} -implicit:none -cp classes -d classes src/Iface2.java
+else
+ ./default-build "$@" --experimental default-methods
+fi
diff --git a/test/966-default-conflict/build-src/Iface2.java b/test/966-default-conflict/build-src/Iface2.java
new file mode 100644
index 0000000000..8d97df8385
--- /dev/null
+++ b/test/966-default-conflict/build-src/Iface2.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// We extend Iface so that javac will not complain that Iface2 does not declare a sayHi method or
+// has a soft-conflict on the sayHi method if it did.
+public interface Iface2 extends Iface {
+ // public default String sayHi() {
+ // return "hello";
+ // }
+}
+
+
diff --git a/test/966-default-conflict/expected.txt b/test/966-default-conflict/expected.txt
new file mode 100644
index 0000000000..fad2c25979
--- /dev/null
+++ b/test/966-default-conflict/expected.txt
@@ -0,0 +1,18 @@
+Create Main instance
+Calling functions on concrete Main
+Calling non-conflicting function on Main
+CHARGE
+Calling conflicting function on Main
+Expected ICCE Thrown on Main
+Calling non-conflicting function on Main
+CHARGE
+Calling functions on interface Iface
+Calling non-conflicting function on Iface
+CHARGE
+Calling conflicting function on Iface
+Expected ICCE Thrown on Iface
+Calling non-conflicting function on Iface
+CHARGE
+Calling functions on interface Iface2
+Calling conflicting function on Iface2
+Expected ICCE Thrown on Iface2
diff --git a/test/966-default-conflict/info.txt b/test/966-default-conflict/info.txt
new file mode 100644
index 0000000000..2b67657dcc
--- /dev/null
+++ b/test/966-default-conflict/info.txt
@@ -0,0 +1,6 @@
+Smali-based tests for experimental interface static methods.
+
+Tests handling of default method conflicts.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/966-default-conflict/run b/test/966-default-conflict/run
new file mode 100755
index 0000000000..8944ea92d3
--- /dev/null
+++ b/test/966-default-conflict/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/966-default-conflict/smali/Iface.smali b/test/966-default-conflict/smali/Iface.smali
new file mode 100644
index 0000000000..e996b3a4f4
--- /dev/null
+++ b/test/966-default-conflict/smali/Iface.smali
@@ -0,0 +1,39 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface Iface {
+# public default String sayHi() {
+# return "Hi";
+# }
+# public default String charge() {
+# return "CHARGE";
+# }
+# }
+
+.class public abstract interface LIface;
+.super Ljava/lang/Object;
+
+.method public sayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hi"
+ return-object v0
+.end method
+
+.method public charge()Ljava/lang/String;
+ .locals 1
+ const-string v0, "CHARGE"
+ return-object v0
+.end method
diff --git a/test/966-default-conflict/smali/Iface2.smali b/test/966-default-conflict/smali/Iface2.smali
new file mode 100644
index 0000000000..82fa547dea
--- /dev/null
+++ b/test/966-default-conflict/smali/Iface2.smali
@@ -0,0 +1,31 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface Iface2 {
+# public default String sayHi() {
+# return "hello";
+# }
+# }
+
+.class public abstract interface LIface2;
+.super Ljava/lang/Object;
+
+.method public sayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "hello"
+ return-object v0
+.end method
+
diff --git a/test/966-default-conflict/smali/Main.smali b/test/966-default-conflict/smali/Main.smali
new file mode 100644
index 0000000000..ce974d8135
--- /dev/null
+++ b/test/966-default-conflict/smali/Main.smali
@@ -0,0 +1,227 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main implements Iface, Iface2 {
+# public static void main(String[] args) {
+# System.out.println("Create Main instance");
+# Main m = new Main();
+# System.out.println("Calling functions on concrete Main");
+# callMain(m);
+# System.out.println("Calling functions on interface Iface");
+# callIface(m);
+# System.out.println("Calling functions on interface Iface2");
+# callIface2(m);
+# }
+#
+# public static void callMain(Main m) {
+# System.out.println("Calling non-conflicting function on Main");
+# System.out.println(m.charge());
+# System.out.println("Calling conflicting function on Main");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Main");
+# } catch (AbstractMethodError e) {
+# System.out.println("Unexpected AME Thrown on Main");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Expected ICCE Thrown on Main");
+# }
+# System.out.println("Calling non-conflicting function on Main");
+# System.out.println(m.charge());
+# return;
+# }
+#
+# public static void callIface(Iface m) {
+# System.out.println("Calling non-conflicting function on Iface");
+# System.out.println(m.charge());
+# System.out.println("Calling conflicting function on Iface");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Iface");
+# } catch (AbstractMethodError e) {
+# System.out.println("Unexpected AME Thrown on Iface");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Expected ICCE Thrown on Iface");
+# }
+# System.out.println("Calling non-conflicting function on Iface");
+# System.out.println(m.charge());
+# return;
+# }
+#
+# public static void callIface2(Iface2 m) {
+# System.out.println("Calling conflicting function on Iface2");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Iface2");
+# } catch (AbstractMethodError e) {
+# System.out.println("Unexpected AME Thrown on Iface2");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Expected ICCE Thrown on Iface2");
+# }
+# return;
+# }
+# }
+
+.class public LMain;
+.super Ljava/lang/Object;
+.implements LIface;
+.implements LIface2;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 3
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ const-string v0, "Create Main instance"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ new-instance v2, LMain;
+ invoke-direct {v2}, LMain;-><init>()V
+
+ const-string v0, "Calling functions on concrete Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callMain(LMain;)V
+
+ const-string v0, "Calling functions on interface Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callIface(LIface;)V
+
+ const-string v0, "Calling functions on interface Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callIface2(LIface2;)V
+
+ return-void
+.end method
+
+.method public static callIface(LIface;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling non-conflicting function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface {p0}, LIface;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Calling conflicting function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Unexpected AME Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Expected ICCE Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+ const-string v0, "Calling non-conflicting function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface {p0}, LIface;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
+
+.method public static callIface2(LIface2;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling conflicting function on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-interface {p0}, LIface2;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Unexpected AME Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Expected ICCE Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+
+ return-void
+.end method
+
+.method public static callMain(LMain;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling non-conflicting function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {p0}, LMain;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Calling conflicting function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Unexpected AME Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Expected ICCE Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+ const-string v0, "Calling non-conflicting function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {p0}, LMain;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/967-default-ame/build b/test/967-default-ame/build
new file mode 100755
index 0000000000..53001a9ad2
--- /dev/null
+++ b/test/967-default-ame/build
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# TODO: Support running with jack.
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ # Build with the non-conflicting version
+ ${JAVAC} -implicit:none -d classes src/Iface.java build-src/Iface2.java build-src/Iface3.java src/Main.java
+ rm classes/Iface2.class
+ rm classes/Iface3.class
+ # Build with the conflicting version
+ ${JAVAC} -implicit:none -cp classes -d classes src/Iface2.java src/Iface3.java
+else
+ ./default-build "$@" --experimental default-methods
+fi
diff --git a/test/967-default-ame/build-src/Iface2.java b/test/967-default-ame/build-src/Iface2.java
new file mode 100644
index 0000000000..55b2ac01b0
--- /dev/null
+++ b/test/967-default-ame/build-src/Iface2.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Iface2 extends Iface {
+ // public String sayHi();
+}
+
+
diff --git a/test/967-default-ame/build-src/Iface3.java b/test/967-default-ame/build-src/Iface3.java
new file mode 100644
index 0000000000..a6faa451e5
--- /dev/null
+++ b/test/967-default-ame/build-src/Iface3.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Iface3 {
+ // public String charge();
+}
diff --git a/test/967-default-ame/expected.txt b/test/967-default-ame/expected.txt
new file mode 100644
index 0000000000..cbd4ad32eb
--- /dev/null
+++ b/test/967-default-ame/expected.txt
@@ -0,0 +1,18 @@
+Create Main instance
+Calling functions on concrete Main
+Calling non-abstract function on Main
+CHARGE
+Calling abstract function on Main
+Expected AME Thrown on Main
+Calling non-abstract function on Main
+CHARGE
+Calling functions on interface Iface
+Calling non-abstract function on Iface
+CHARGE
+Calling abstract function on Iface
+Expected AME Thrown on Iface
+Calling non-abstract function on Iface
+CHARGE
+Calling functions on interface Iface2
+Calling abstract function on Iface2
+Expected AME Thrown on Iface2
diff --git a/test/967-default-ame/info.txt b/test/967-default-ame/info.txt
new file mode 100644
index 0000000000..a346a322ec
--- /dev/null
+++ b/test/967-default-ame/info.txt
@@ -0,0 +1,6 @@
+Smali-based tests for experimental interface static methods.
+
+Tests handling of default method overrides.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/967-default-ame/run b/test/967-default-ame/run
new file mode 100755
index 0000000000..8944ea92d3
--- /dev/null
+++ b/test/967-default-ame/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/967-default-ame/smali/Iface.smali b/test/967-default-ame/smali/Iface.smali
new file mode 100644
index 0000000000..e996b3a4f4
--- /dev/null
+++ b/test/967-default-ame/smali/Iface.smali
@@ -0,0 +1,39 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface Iface {
+# public default String sayHi() {
+# return "Hi";
+# }
+# public default String charge() {
+# return "CHARGE";
+# }
+# }
+
+.class public abstract interface LIface;
+.super Ljava/lang/Object;
+
+.method public sayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hi"
+ return-object v0
+.end method
+
+.method public charge()Ljava/lang/String;
+ .locals 1
+ const-string v0, "CHARGE"
+ return-object v0
+.end method
diff --git a/test/967-default-ame/smali/Iface2.smali b/test/967-default-ame/smali/Iface2.smali
new file mode 100644
index 0000000000..a21a8ddbc7
--- /dev/null
+++ b/test/967-default-ame/smali/Iface2.smali
@@ -0,0 +1,27 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface Iface2 extends Iface {
+# public String sayHi();
+# }
+
+.class public abstract interface LIface2;
+.super Ljava/lang/Object;
+.implements LIface;
+
+.method public abstract sayHi()Ljava/lang/String;
+.end method
+
diff --git a/test/967-default-ame/smali/Iface3.smali b/test/967-default-ame/smali/Iface3.smali
new file mode 100644
index 0000000000..874e96d069
--- /dev/null
+++ b/test/967-default-ame/smali/Iface3.smali
@@ -0,0 +1,26 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface Iface3 {
+# public String charge();
+# }
+
+.class public abstract interface LIface3;
+.super Ljava/lang/Object;
+
+.method public abstract charge()Ljava/lang/String;
+.end method
+
diff --git a/test/967-default-ame/smali/Main.smali b/test/967-default-ame/smali/Main.smali
new file mode 100644
index 0000000000..e4d63cfa24
--- /dev/null
+++ b/test/967-default-ame/smali/Main.smali
@@ -0,0 +1,228 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main implements Iface, Iface2, Iface3 {
+# public static void main(String[] args) {
+# System.out.println("Create Main instance");
+# Main m = new Main();
+# System.out.println("Calling functions on concrete Main");
+# callMain(m);
+# System.out.println("Calling functions on interface Iface");
+# callIface(m);
+# System.out.println("Calling functions on interface Iface2");
+# callIface2(m);
+# }
+#
+# public static void callMain(Main m) {
+# System.out.println("Calling non-abstract function on Main");
+# System.out.println(m.charge());
+# System.out.println("Calling abstract function on Main");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Main");
+# } catch (AbstractMethodError e) {
+# System.out.println("Expected AME Thrown on Main");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Unexpected ICCE Thrown on Main");
+# }
+# System.out.println("Calling non-abstract function on Main");
+# System.out.println(m.charge());
+# return;
+# }
+#
+# public static void callIface(Iface m) {
+# System.out.println("Calling non-abstract function on Iface");
+# System.out.println(m.charge());
+# System.out.println("Calling abstract function on Iface");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Iface");
+# } catch (AbstractMethodError e) {
+# System.out.println("Expected AME Thrown on Iface");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Unexpected ICCE Thrown on Iface");
+# }
+# System.out.println("Calling non-abstract function on Iface");
+# System.out.println(m.charge());
+# return;
+# }
+#
+# public static void callIface2(Iface2 m) {
+# System.out.println("Calling abstract function on Iface2");
+# try {
+# System.out.println(m.sayHi());
+# System.out.println("Unexpected no error Thrown on Iface2");
+# } catch (AbstractMethodError e) {
+# System.out.println("Expected AME Thrown on Iface2");
+# } catch (IncompatibleClassChangeError e) {
+# System.out.println("Unexpected ICCE Thrown on Iface2");
+# }
+# return;
+# }
+# }
+
+.class public LMain;
+.super Ljava/lang/Object;
+.implements LIface;
+.implements LIface2;
+.implements LIface3;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 3
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ const-string v0, "Create Main instance"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ new-instance v2, LMain;
+ invoke-direct {v2}, LMain;-><init>()V
+
+ const-string v0, "Calling functions on concrete Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callMain(LMain;)V
+
+ const-string v0, "Calling functions on interface Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callIface(LIface;)V
+
+ const-string v0, "Calling functions on interface Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-static {v2}, LMain;->callIface2(LIface2;)V
+
+ return-void
+.end method
+
+.method public static callIface(LIface;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling non-abstract function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface {p0}, LIface;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Calling abstract function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Expected AME Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Unexpected ICCE Thrown on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+ const-string v0, "Calling non-abstract function on Iface"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface {p0}, LIface;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
+
+.method public static callIface2(LIface2;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling abstract function on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-interface {p0}, LIface2;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Expected AME Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Unexpected ICCE Thrown on Iface2"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+
+ return-void
+.end method
+
+.method public static callMain(LMain;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Calling non-abstract function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {p0}, LMain;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Calling abstract function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ :try_start
+ invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "Unexpected no error Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ goto :error_end
+ :try_end
+ .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start
+ :AME_error_start
+ const-string v0, "Expected AME Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :ICCE_error_start
+ const-string v0, "Unexpected ICCE Thrown on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :error_end
+ :error_end
+ const-string v0, "Calling non-abstract function on Main"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {p0}, LMain;->charge()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/968-default-partial-compile-generated/build b/test/968-default-partial-compile-generated/build
new file mode 100755
index 0000000000..1e9f8aadd5
--- /dev/null
+++ b/test/968-default-partial-compile-generated/build
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# TODO: Support running with jack.
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p classes
+ mkdir -p src
+ echo "${JAVAC} \$@" >> ./javac_exec.sh
+ # This will use java_exec.sh to execute the javac compiler. It will place the
+ # compiled class files in ./classes and the expected values in expected.txt
+ #
+ # After this the src directory will contain the final versions of all files.
+ ./util-src/generate_java.py ./javac_exec.sh ./src ./classes ./expected.txt ./build_log
+else
+ mkdir -p ./smali
+ # Generate the smali files and expected.txt or fail
+ ./util-src/generate_smali.py ./smali ./expected.txt
+ # Use the default build script
+ ./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
+fi
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/968-default-partial-compile-generated/expected.txt b/test/968-default-partial-compile-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/968-default-partial-compile-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/968-default-partial-compile-generated/info.txt b/test/968-default-partial-compile-generated/info.txt
new file mode 100644
index 0000000000..bc1c42816e
--- /dev/null
+++ b/test/968-default-partial-compile-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for experimental interface default methods.
+
+This tests that interface method resolution order is correct in the presence of
+partial compilation/illegal invokes.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the util-src/generate_java.py script
+will generate equivalent java code based on the smali code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/968-default-partial-compile-generated/run b/test/968-default-partial-compile-generated/run
new file mode 100755
index 0000000000..6d2930d463
--- /dev/null
+++ b/test/968-default-partial-compile-generated/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/968-default-partial-compile-generated/util-src/generate_java.py b/test/968-default-partial-compile-generated/util-src/generate_java.py
new file mode 100755
index 0000000000..35290efe1d
--- /dev/null
+++ b/test/968-default-partial-compile-generated/util-src/generate_java.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate java test files for test 966.
+"""
+
+import generate_smali as base
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+import testgen.mixins as mixins
+import functools
+import operator
+import subprocess
+
+class JavaConverter(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin):
+ """
+ A class that can convert a SmaliFile to a JavaFile.
+ """
+ def __init__(self, inner):
+ self.inner = inner
+
+ def get_name(self):
+ """Gets the name of this file."""
+ return self.inner.get_name()
+
+ def __str__(self):
+ out = ""
+ for line in str(self.inner).splitlines(keepends = True):
+ if line.startswith("#"):
+ out += line[1:]
+ return out
+
+class Compiler:
+ def __init__(self, sources, javac, temp_dir, classes_dir):
+ self.javac = javac
+ self.temp_dir = temp_dir
+ self.classes_dir = classes_dir
+ self.sources = sources
+
+ def compile_files(self, args, files):
+ """
+ Compile the files given with the arguments given.
+ """
+ args = args.split()
+ files = list(map(str, files))
+ cmd = ['sh', '-a', '-e', '--', str(self.javac)] + args + files
+ print("Running compile command: {}".format(cmd))
+ subprocess.check_call(cmd)
+ print("Compiled {} files".format(len(files)))
+
+ def execute(self):
+ """
+ Compiles this test, doing partial compilation as necessary.
+ """
+ # Compile Main and all classes first. Force all interfaces to be default so that there will be
+ # no compiler problems (works since classes only implement 1 interface).
+ for f in self.sources:
+ if isinstance(f, base.TestInterface):
+ JavaConverter(f.get_specific_version(base.InterfaceType.default)).dump(self.temp_dir)
+ else:
+ JavaConverter(f).dump(self.temp_dir)
+ self.compile_files("-d {}".format(self.classes_dir), self.temp_dir.glob("*.java"))
+
+ # Now we compile the interfaces
+ ifaces = set(i for i in self.sources if isinstance(i, base.TestInterface))
+ while len(ifaces) != 0:
+ # Find those ifaces where there are no (uncompiled) interfaces that are subtypes.
+ tops = set(filter(lambda a: not any(map(lambda i: a in i.get_super_types(), ifaces)), ifaces))
+ files = []
+ # Dump these ones, they are getting compiled.
+ for f in tops:
+ out = JavaConverter(f)
+ out.dump(self.temp_dir)
+ files.append(self.temp_dir / out.get_file_name())
+ # Force all superinterfaces of these to be empty so there will be no conflicts
+ overrides = functools.reduce(operator.or_, map(lambda i: i.get_super_types(), tops), set())
+ for overridden in overrides:
+ out = JavaConverter(overridden.get_specific_version(base.InterfaceType.empty))
+ out.dump(self.temp_dir)
+ files.append(self.temp_dir / out.get_file_name())
+ self.compile_files("-d {outdir} -cp {outdir}".format(outdir = self.classes_dir), files)
+ # Remove these from the set of interfaces to be compiled.
+ ifaces -= tops
+ print("Finished compiling all files.")
+ return
+
+def main(argv):
+ javac_exec = Path(argv[1])
+ if not javac_exec.exists() or not javac_exec.is_file():
+ print("{} is not a shell script".format(javac_exec), file=sys.stderr)
+ sys.exit(1)
+ temp_dir = Path(argv[2])
+ if not temp_dir.exists() or not temp_dir.is_dir():
+ print("{} is not a valid source dir".format(temp_dir), file=sys.stderr)
+ sys.exit(1)
+ classes_dir = Path(argv[3])
+ if not classes_dir.exists() or not classes_dir.is_dir():
+ print("{} is not a valid classes directory".format(classes_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[4])
+ mainclass, all_files = base.create_all_test_files()
+
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ print("Wrote expected output")
+
+ Compiler(all_files, javac_exec, temp_dir, classes_dir).execute()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/968-default-partial-compile-generated/util-src/generate_smali.py b/test/968-default-partial-compile-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..9855bcf854
--- /dev/null
+++ b/test/968-default-partial-compile-generated/util-src/generate_smali.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 967.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from enum import Enum
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the type tree can have.
+MAX_IFACE_DEPTH = 3
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_funcs}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 0
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the MainClass smali code.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_funcs = ""
+ for t in all_tests:
+ test_funcs += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("smali"),
+ test_funcs = test_funcs,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# {farg} v = null;
+# try {{
+# v = new {farg}();
+# }} catch (Throwable e) {{
+# System.out.println("Unexpected error occurred which creating {farg} instance");
+# e.printStackTrace(System.out);
+# return;
+# }}
+# try {{
+# System.out.printf("{tree} calls %s\\n", v.getName());
+# return;
+# }} catch (AbstractMethodError e) {{
+# System.out.println("{tree} threw AbstractMethodError");
+# }} catch (NoSuchMethodError e) {{
+# System.out.println("{tree} threw NoSuchMethodError");
+# }} catch (IncompatibleClassChangeError e) {{
+# System.out.println("{tree} threw IncompatibleClassChangeError");
+# }} catch (Throwable e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ :new_{fname}_try_start
+ new-instance v0, L{farg};
+ invoke-direct {{v0}}, L{farg};-><init>()V
+ goto :call_{fname}_try_start
+ :new_{fname}_try_end
+ .catch Ljava/lang/Throwable; {{:new_{fname}_try_start .. :new_{fname}_try_end}} :new_error_{fname}_start
+ :new_error_{fname}_start
+ move-exception v6
+ const-string v5, "Unexpected error occurred which creating {farg} instance"
+ invoke-virtual {{v4,v5}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-virtual {{v6,v4}}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+ :call_{fname}_try_start
+ const/4 v1, 1
+ new-array v2,v1, [Ljava/lang/Object;
+ const/4 v1, 0
+ invoke-virtual {{v0}}, L{farg};->getName()Ljava/lang/String;
+ move-result-object v3
+ aput-object v3,v2,v1
+
+ const-string v5, "{tree} calls %s\\n"
+
+ invoke-virtual {{v4,v5,v2}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/AbstractMethodError; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :AME_{fname}_start
+ .catch Ljava/lang/NoSuchMethodError; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :NSME_{fname}_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :ICCE_{fname}_start
+ .catch Ljava/lang/Throwable; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :AME_{fname}_start
+ const-string v5, "{tree} threw AbstractMethodError"
+ invoke-virtual {{v4,v5}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+ :NSME_{fname}_start
+ const-string v5, "{tree} threw NoSuchMethodError"
+ invoke-virtual {{v4,v5}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+ :ICCE_{fname}_start
+ const-string v5, "{tree} threw IncompatibleClassChangeError"
+ invoke-virtual {{v4,v5}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+ :error_{fname}_start
+ move-exception v6
+ invoke-virtual {{v6,v4}}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ NSME_RESULT_TEMPLATE = "{tree} threw NoSuchMethodError"
+ ICCE_RESULT_TEMPLATE = "{tree} threw IncompatibleClassChangeError"
+ AME_RESULT_TEMPLATE = "{tree} threw AbstractMethodError"
+ NORMAL_RESULT_TEMPLATE = "{tree} calls {result}"
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def get_expected(self):
+ """
+ Get the expected output calling this function.
+ """
+ exp = self.farg.get_called()
+ if exp.is_empty():
+ return self.NSME_RESULT_TEMPLATE.format(tree = self.farg.get_tree())
+ elif exp.is_abstract():
+ return self.AME_RESULT_TEMPLATE.format(tree = self.farg.get_tree())
+ elif exp.is_conflict():
+ return self.ICCE_RESULT_TEMPLATE.format(tree = self.farg.get_tree())
+ else:
+ assert exp.is_default()
+ return self.NORMAL_RESULT_TEMPLATE.format(tree = self.farg.get_tree(),
+ result = exp.get_tree())
+
+ def get_name(self):
+ """
+ Get the name of this function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def __str__(self):
+ """
+ Print the smali code of this function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(tree = self.farg.get_tree(),
+ fname = self.get_name(),
+ farg = self.farg.get_name())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test default method resolution order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+.implements L{iface_name};
+
+# public class {class_name} implements {iface_name} {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{funcs}
+
+# }}
+"""
+
+ def __init__(self, iface):
+ """
+ Initialize this test class which implements the given interface
+ """
+ self.iface = iface
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iface_tree}]".format(class_name = self.class_name,
+ iface_tree = self.iface.get_tree())
+
+ def __iter__(self):
+ """
+ Step through all interfaces implemented transitively by this class
+ """
+ yield self.iface
+ yield from self.iface
+
+ def get_called(self):
+ """
+ Returns the interface that will be called when the method on this class is invoked or
+ CONFLICT_TYPE if there is no interface that will be called.
+ """
+ return self.iface.get_called()
+
+ def __str__(self):
+ """
+ Print the smali code of this class.
+ """
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ iface_name = self.iface.get_name(),
+ tree = self.get_tree(),
+ class_name = self.class_name,
+ funcs = "")
+
+class InterfaceType(Enum):
+ """
+ An enumeration of all the different types of interfaces we can have.
+
+ default: It has a default method
+ abstract: It has a method declared but not defined
+ empty: It does not have the method
+ """
+ default = 0
+ abstract = 1
+ empty = 2
+
+ def get_suffix(self):
+ if self == InterfaceType.default:
+ return "_DEFAULT"
+ elif self == InterfaceType.abstract:
+ return "_ABSTRACT"
+ elif self == InterfaceType.empty:
+ return "_EMPTY"
+ else:
+ raise TypeError("Interface type had illegal value.")
+
+class ConflictInterface:
+ """
+ A singleton representing a conflict of default methods.
+ """
+
+ def is_conflict(self):
+ """
+ Returns true if this is a conflict interface and calling the method on this interface will
+ result in an IncompatibleClassChangeError.
+ """
+ return True
+
+ def is_abstract(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in an AbstractMethodError.
+ """
+ return False
+
+ def is_empty(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in a NoSuchMethodError.
+ """
+ return False
+
+ def is_default(self):
+ """
+ Returns true if this is a default interface and calling the method on this interface will
+ result in a method actually being called.
+ """
+ return False
+
+CONFLICT_TYPE = ConflictInterface()
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+
+{funcs}
+
+# }}
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default String getName() {{
+# return "{tree}";
+# }}
+.method public getName()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{tree}"
+ return-object v0
+.end method
+"""
+
+ ABSTRACT_FUNC_TEMPLATE = """
+# public String getName();
+.method public abstract getName()Ljava/lang/String;
+.end method
+"""
+
+ EMPTY_FUNC_TEMPLATE = """"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ def __init__(self, ifaces, iface_type, full_name = None):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = sorted(ifaces)
+ self.iface_type = iface_type
+ if full_name is None:
+ end = self.iface_type.get_suffix()
+ self.class_name = "INTERFACE_"+gensym()+end
+ else:
+ self.class_name = full_name
+
+ def get_specific_version(self, v):
+ """
+ Returns a copy of this interface of the given type for use in partial compilation.
+ """
+ return TestInterface(self.ifaces, v, full_name = self.class_name)
+
+ def get_super_types(self):
+ """
+ Returns a set of all the supertypes of this interface
+ """
+ return set(i2 for i2 in self)
+
+ def is_conflict(self):
+ """
+ Returns true if this is a conflict interface and calling the method on this interface will
+ result in an IncompatibleClassChangeError.
+ """
+ return False
+
+ def is_abstract(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in an AbstractMethodError.
+ """
+ return self.iface_type == InterfaceType.abstract
+
+ def is_empty(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in a NoSuchMethodError.
+ """
+ return self.iface_type == InterfaceType.empty
+
+ def is_default(self):
+ """
+ Returns true if this is a default interface and calling the method on this interface will
+ result in a method actually being called.
+ """
+ return self.iface_type == InterfaceType.default
+
+ def get_called(self):
+ """
+ Returns the interface that will be called when the method on this class is invoked or
+ CONFLICT_TYPE if there is no interface that will be called.
+ """
+ if not self.is_empty() or len(self.ifaces) == 0:
+ return self
+ else:
+ best = self
+ for super_iface in self.ifaces:
+ super_best = super_iface.get_called()
+ if super_best.is_conflict():
+ return CONFLICT_TYPE
+ elif best.is_default():
+ if super_best.is_default():
+ return CONFLICT_TYPE
+ elif best.is_abstract():
+ if super_best.is_default():
+ best = super_best
+ else:
+ assert best.is_empty()
+ best = super_best
+ return best
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def __str__(self):
+ """
+ Print the smali code of this interface.
+ """
+ s_ifaces = " "
+ j_ifaces = " "
+ for i in self.ifaces:
+ s_ifaces += self.IMPLEMENTS_TEMPLATE.format(iface_name = i.get_name())
+ j_ifaces += " {},".format(i.get_name())
+ j_ifaces = j_ifaces[0:-1]
+ if self.is_default():
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(tree = self.get_tree())
+ elif self.is_abstract():
+ funcs = self.ABSTRACT_FUNC_TEMPLATE.format()
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def print_tree(ifaces):
+ """
+ Prints a list of iface trees
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+# The deduplicated output of subtree_sizes for each size up to
+# MAX_LEAF_IFACE_PER_OBJECT.
+SUBTREES = [set(tuple(sorted(l)) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_test_classes():
+ """
+ Yield all the test classes with the different interface trees
+ """
+ for num in range(1, MAX_IFACE_DEPTH + 1):
+ for iface in create_interface_trees(num):
+ yield TestClass(iface)
+
+def create_interface_trees(num):
+ """
+ Yield all the interface trees up to 'num' depth.
+ """
+ if num == 0:
+ for iftype in InterfaceType:
+ yield TestInterface(tuple(), iftype)
+ return
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ yield TestInterface(tuple(), InterfaceType.default)
+ for supers in itertools.product(*ifaces):
+ for iftype in InterfaceType:
+ if iftype == InterfaceType.default:
+ # We can just stop at defaults. We have other tests that a default can override an
+ # abstract and this cuts down on the number of cases significantly, improving speed of
+ # this test.
+ continue
+ yield TestInterface(supers, iftype)
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for clazz in create_test_classes():
+ classes.add(clazz)
+ for i in clazz:
+ classes.add(i)
+ mc.add_test(clazz)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8744674a30..8bd5f9fb75 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -226,6 +226,7 @@ TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
961-default-iface-resolution-generated \
964-default-iface-init-generated \
+ 968-default-partial-compile-generated
# Check if we have python3 to run our tests.
ifeq ($(wildcard /usr/bin/python3),)
@@ -304,12 +305,13 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# Tests that are broken with GC stress.
# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
# hope the second process got into the expected state. The slowness of gcstress makes this bad.
-# * 961-default-iface-resolution-generated is a very long test that often will take more than the
-# timeout to run when gcstress is enabled. This is because gcstress slows down allocations
-# significantly which this test does a lot.
+# * 961-default-iface-resolution-generated and 964-default-iface-init-generated are very long tests
+# that often will take more than the timeout to run when gcstress is enabled. This is because
+# gcstress slows down allocations significantly which these tests do a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
137-cfi \
- 961-default-iface-resolution-generated
+ 961-default-iface-resolution-generated \
+ 964-default-iface-init-generated
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -495,16 +497,20 @@ endif
TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS :=
# Tests that should fail in the read barrier configuration.
+# 055: Exceeds run time limits due to read barrier instrumentation.
# 137: Read barrier forces interpreter. Cannot run this with the interpreter.
-# 141: Class unloading test is flaky with CC since CC seems to occasionally keep class loaders live.
+# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet
+# handled in the read barrier configuration.
TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS := \
- 137-cfi \
- 141-class-unload
+ 055-enum-performance \
+ 137-cfi \
+ 537-checker-arraycopy
ifeq ($(ART_USE_READ_BARRIER),true)
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
+ $(PREBUILD_TYPES),$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
+ $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS :=
diff --git a/test/dexdump/run-all-tests b/test/dexdump/run-all-tests
index 9cf7ab6cea..11ab55ae30 100755
--- a/test/dexdump/run-all-tests
+++ b/test/dexdump/run-all-tests
@@ -40,7 +40,7 @@ mkdir ${tmpdir}
# Set up dexdump binary and flags to test.
DEXD="${ANDROID_HOST_OUT}/bin/dexdump2"
DEXDFLAGS1="-dfh"
-DEXDFLAGS2="-l xml"
+DEXDFLAGS2="-e -l xml"
# Set up dexlist binary and flags to test.
DEXL="${ANDROID_HOST_OUT}/bin/dexlist"
diff --git a/test/run-test b/test/run-test
index 9b0261ed31..10ec3103b9 100755
--- a/test/run-test
+++ b/test/run-test
@@ -665,6 +665,15 @@ chmod 755 "$check_cmd"
export TEST_NAME=`basename ${test_dir}`
+# arch_supports_read_barrier ARCH
+# -------------------------------
+# Return whether the Optimizing compiler has read barrier support for ARCH.
+function arch_supports_read_barrier() {
+ # Optimizing has read barrier support for x86 and x86-64 at the
+ # moment.
+ [ "x$1" = xx86 ] || [ "x$1" = xx86_64 ]
+}
+
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
@@ -678,9 +687,24 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
USE_JACK="false"
if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" ]; then
+ # Optimizing has read barrier support for certain architectures
+ # only. On other architectures, compiling is disabled when read
+ # barriers are enabled, meaning that we do not produce a CFG file
+ # as a side-effect of compilation, thus the Checker assertions
+ # cannot be checked. Disable Checker for those cases.
+ #
+ # TODO: Enable Checker when read barrier support is added to more
+ # architectures (b/12687968).
+ if [ "x$ART_USE_READ_BARRIER" = xtrue ] \
+ && (([ "x$host_mode" = "xyes" ] \
+ && ! arch_supports_read_barrier "$host_arch_name") \
+ || ([ "x$target_mode" = "xyes" ] \
+ && ! arch_supports_read_barrier "$target_arch_name")); then
+ run_checker="no"
# In no-prebuild mode, the compiler is only invoked if both dex2oat and
# patchoat are available. Disable Checker otherwise (b/22552692).
- if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
+ elif [ "$prebuild_mode" = "yes" ] \
+ || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
run_checker="yes"
if [ "$target_mode" = "no" ]; then
@@ -708,10 +732,7 @@ fi
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
build_file_size_limit=2048
run_file_size_limit=2048
-if echo "$test_dir" | grep 089; then
- build_file_size_limit=5120
- run_file_size_limit=5120
-elif echo "$test_dir" | grep 083; then
+if echo "$test_dir" | grep -Eq "(083|089|964)" > /dev/null; then
build_file_size_limit=5120
run_file_size_limit=5120
fi
diff --git a/test/utils/python/testgen/mixins.py b/test/utils/python/testgen/mixins.py
index 085e51def2..aa8943baf3 100644
--- a/test/utils/python/testgen/mixins.py
+++ b/test/utils/python/testgen/mixins.py
@@ -79,6 +79,12 @@ class SmaliFileMixin(get_file_extension_mixin(".smali")):
"""
pass
+class JavaFileMixin(get_file_extension_mixin(".java")):
+ """
+ A mixin that defines that the file this class belongs to is get_name() + ".java".
+ """
+ pass
+
class NameComparableMixin(object):
"""
A mixin that defines the object comparison and related functionality in terms