summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/optimizing/code_generator_x86.cc240
-rw-r--r--compiler/optimizing/code_generator_x86.h7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc160
-rw-r--r--compiler/optimizing/code_generator_x86_64.h7
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.cc132
-rw-r--r--compiler/optimizing/nodes.cc19
-rw-r--r--compiler/optimizing/nodes.h3
-rw-r--r--compiler/optimizing/nodes_x86.h39
-rw-r--r--compiler/utils/x86/assembler_x86.cc24
-rw-r--r--compiler/utils/x86/assembler_x86.h103
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc21
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h69
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--oatdump/oatdump.cc9
-rw-r--r--runtime/class_linker.cc136
-rw-r--r--runtime/class_linker.h12
-rw-r--r--runtime/jvalue.h8
-rw-r--r--runtime/mirror/dex_cache_test.cc25
-rw-r--r--runtime/monitor.cc4
-rw-r--r--runtime/monitor.h4
-rw-r--r--runtime/monitor_android.cc4
-rw-r--r--runtime/monitor_linux.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc9
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc3
-rw-r--r--runtime/oat_file_manager.cc30
-rw-r--r--runtime/verifier/register_line.cc101
-rw-r--r--test/088-monitor-verification/src/TwoPath.java2
-rw-r--r--test/131-structural-change/expected.txt1
-rw-r--r--test/131-structural-change/src/Main.java6
-rw-r--r--test/Android.run-test.mk27
34 files changed, 856 insertions, 367 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 70c9dc1cd3..1b54a510fd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -65,6 +65,7 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
+ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 8e3b5550fb..20c80235ba 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -66,6 +66,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_utils.cc \
+ optimizing/constant_area_fixups_x86.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index e535afd272..1a7dbe3a9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -370,7 +370,9 @@ inline int CompilerDriver::IsFastInvoke(
nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file)));
+ auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ *devirt_target->dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
called_method = class_linker->ResolveMethod(
*devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
class_loader, nullptr, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 74f19a1029..8324bf30d6 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -953,7 +953,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
StackHandleScope<2> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(
+ *dex_file,
+ Runtime::Current()->GetLinearAlloc())));
Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
@@ -2010,9 +2012,11 @@ class ResolveTypeVisitor : public CompilationVisitor {
ClassLinker* class_linker = manager_->GetClassLinker();
const DexFile& dex_file = *manager_->GetDexFile();
StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(dex_file)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == nullptr) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f8be21a06e..0ec3780290 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -521,7 +521,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5669,6 +5670,51 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
+void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Constant area pointer.
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // And the temporary we need.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Optimizing has a jump area.
+ Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+
+ // Remove the bias, if needed.
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg, -lower_bound));
+ value_reg = temp_reg;
+ }
+
+ // Is the value in range?
+ DCHECK_GE(num_entries, 1);
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load (target-constant_area) from the jump table, indexing by the value.
+ __ movl(temp_reg, codegen_->LiteralCaseTable(switch_instr, constant_area, value_reg));
+
+ // Compute the actual target address by adding in constant_area.
+ __ addl(temp_reg, constant_area);
+
+ // And jump.
+ __ jmp(temp_reg);
+}
+
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
@@ -5752,28 +5798,18 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
}
}
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
- // Generate the constant area if needed.
- X86Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values.
- assembler->Align(4, 0);
- constant_area_start_ = assembler->CodeSize();
- assembler->AddConstantArea();
- }
-
- // And finish up.
- CodeGenerator::Finalize(allocator);
-}
-
/**
* Class to handle late fixup of offsets into constant area.
*/
class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
- RIPFixup(const CodeGeneratorX86& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
+ RIPFixup(CodeGeneratorX86& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86* codegen_;
private:
void Process(const MemoryRegion& region, int pos) OVERRIDE {
@@ -5781,19 +5817,77 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
- int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
}
- const CodeGeneratorX86& codegen_;
-
// Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
+ int32_t offset_into_constant_area_;
+};
+
+/**
+ * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86& codegen, HX86PackedSwitch* switch_instr)
+ : RIPFixup(codegen, static_cast<size_t>(-1)), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // The label values in the jump table are computed relative to the
+ // instruction addressing the constant area.
+ const int32_t relative_offset = codegen_->GetMethodAddressOffset();
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - relative_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HX86PackedSwitch* switch_instr_;
};
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
@@ -5814,98 +5908,18 @@ Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
return Address(reg, kDummy32BitOffset, fixup);
}
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class ConstantHandlerVisitor : public HGraphVisitor {
- public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
-
- private:
- void VisitAdd(HAdd* add) OVERRIDE {
- BinaryFP(add);
- }
-
- void VisitSub(HSub* sub) OVERRIDE {
- BinaryFP(sub);
- }
-
- void VisitMul(HMul* mul) OVERRIDE {
- BinaryFP(mul);
- }
-
- void VisitDiv(HDiv* div) OVERRIDE {
- BinaryFP(div);
- }
-
- void VisitReturn(HReturn* ret) OVERRIDE {
- HConstant* value = ret->InputAt(0)->AsConstant();
- if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
- ReplaceInput(ret, value, 0, true);
- }
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void BinaryFP(HBinaryOperation* bin) {
- HConstant* rhs = bin->InputAt(1)->AsConstant();
- if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
- ReplaceInput(bin, rhs, 1, false);
- }
- }
-
- void InitializeConstantAreaPointer(HInstruction* user) {
- // Ensure we only initialize the pointer once.
- if (base_ != nullptr) {
- return;
- }
+Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
+ Register reg,
+ Register value) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
- HGraph* graph = GetGraph();
- HBasicBlock* entry = graph->GetEntryBlock();
- base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
- HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
- entry->InsertInstructionBefore(base_, insert_pos);
- DCHECK(base_ != nullptr);
- }
-
- void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
- HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
- insn->ReplaceInput(load_constant, input_index);
- }
-
- void HandleInvoke(HInvoke* invoke) {
- // Ensure that we can load FP arguments from the constant area.
- for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
- HConstant* input = invoke->InputAt(i)->AsConstant();
- if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
- ReplaceInput(invoke, input, i, true);
- }
- }
- }
-
- // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
- // input to the HX86LoadFromConstantTable instructions.
- HX86ComputeBaseMethodAddress* base_;
-};
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
- visitor.VisitInsertionOrder();
+ // We want a scaled address, as we are extracting the correct offset from the table.
+ return Address(reg, value, TIMES_4, kDummy32BitOffset, table_fixup);
}
// TODO: target as memory.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ae2d84f945..fdfc5ab69b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
};
+class JumpTableRIPFixup;
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
@@ -385,6 +387,8 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralInt32Address(int32_t v, Register reg);
Address LiteralInt64Address(int64_t v, Register reg);
+ Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
private:
@@ -405,6 +409,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Used for fixups to the constant area.
int32_t constant_area_start_;
+ // Fixups for jump tables that need to be patched after the constant table is generated.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
// If there is a HX86ComputeBaseMethodAddress instruction in the graph
// (which shall be the sole instruction of this kind), subtracting this offset
// from the value contained in the out register of this HX86ComputeBaseMethodAddress
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 21120a0c80..f0d9420f87 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -670,7 +670,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5322,31 +5323,43 @@ void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
int32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
- CpuRegister value_reg = locations->InAt(0).AsRegister<CpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+ // Remove the bias, if needed.
+ Register value_reg_out = value_reg_in.AsRegister();
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg_in, -lower_bound));
+ value_reg_out = temp_reg.AsRegister();
}
+ CpuRegister value_reg(value_reg_out);
- // And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
- }
+ // Is the value in range?
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the address of the jump table in the constant area.
+ __ leaq(base_reg, codegen_->LiteralCaseTable(switch_instr));
+
+ // Load the (signed) offset from the jump table.
+ __ movsxd(temp_reg, Address(base_reg, value_reg, TIMES_4, 0));
+
+ // Add the offset to the address of the table base.
+ __ addq(temp_reg, base_reg);
+
+ // And jump.
+ __ jmp(temp_reg);
}
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
@@ -5372,15 +5385,85 @@ void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
}
}
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ RIPFixup(CodeGeneratorX86_64& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86_64* codegen_;
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. We use the address of the
+ // 'next' instruction, which is 'pos' (patch the 4 bytes before).
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - pos;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ // Location in constant area that the fixup refers to.
+ size_t offset_into_constant_area_;
+};
+
+/**
+ t * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86_64& codegen, HPackedSwitch* switch_instr)
+ : RIPFixup(codegen, -1), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // Compute the offset from the start of the function to this jump table.
+ const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table;
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - current_table_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HPackedSwitch* switch_instr_;
+};
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values. If used for vectors at a later time, this will need to be
- // updated to 16 bytes with the appropriate offset.
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8 byte values.
assembler->Align(4, 0);
constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
assembler->AddConstantArea();
}
@@ -5388,31 +5471,6 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-/**
- * Class to handle late fixup of offsets into constant area.
- */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
- public:
- RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
-
- private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
- // Patch the correct offset for the instruction. We use the address of the
- // 'next' instruction, which is 'pos' (patch the 4 bytes before).
- int constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int relative_position = constant_offset - pos;
-
- // Patch in the right value.
- region.StoreUnaligned<int32_t>(pos - 4, relative_position);
- }
-
- const CodeGeneratorX86_64& codegen_;
-
- // Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
-};
-
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
@@ -5453,6 +5511,16 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type t
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
+ return Address::RIP(table_fixup);
+}
+
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d6a6a7e760..dc86a48ce7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -234,6 +234,9 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
};
+// Class for fixups to jump tables.
+class JumpTableRIPFixup;
+
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
@@ -354,6 +357,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Load a 64 bit value into a register in the most efficient manner.
void Load64BitValue(CpuRegister dest, int64_t value);
+ Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
@@ -391,6 +395,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// We will fix this up in the linker later to have the right value.
static constexpr int32_t kDummy32BitOffset = 256;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/constant_area_fixups_x86.cc
new file mode 100644
index 0000000000..c3470002c5
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_area_fixups_x86.h"
+
+namespace art {
+namespace x86 {
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
+ // address the constant area.
+ InitializeConstantAreaPointer(switch_insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 24a89bca4e..ed401b67c5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -606,8 +606,23 @@ static void UpdateInputsUsers(HInstruction* instruction) {
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
- InsertInstructionBefore(replacement, initial);
- initial->ReplaceWith(replacement);
+ if (initial->IsControlFlow()) {
+ // We can only replace a control flow instruction with another control flow instruction.
+ DCHECK(replacement->IsControlFlow());
+ DCHECK_EQ(replacement->GetId(), -1);
+ DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid);
+ DCHECK_EQ(initial->GetBlock(), this);
+ DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid);
+ DCHECK(initial->GetUses().IsEmpty());
+ DCHECK(initial->GetEnvUses().IsEmpty());
+ replacement->SetBlock(this);
+ replacement->SetId(GetGraph()->GetNextInstructionId());
+ instructions_.InsertInstructionBefore(replacement, initial);
+ UpdateInputsUsers(replacement);
+ } else {
+ InsertInstructionBefore(replacement, initial);
+ initial->ReplaceWith(replacement);
+ }
RemoveInstruction(initial);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 82909c41b6..0d668e8cf7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1074,7 +1074,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
- M(X86LoadFromConstantTable, Instruction)
+ M(X86LoadFromConstantTable, Instruction) \
+ M(X86PackedSwitch, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index f7cc872419..556217bf74 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -62,6 +62,45 @@ class HX86LoadFromConstantTable : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
};
+// X86 version of HPackedSwitch that holds a pointer to the base method address.
+class HX86PackedSwitch : public HTemplateInstruction<2> {
+ public:
+ HX86PackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HX86ComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(1)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(X86PackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 04e815aa1d..5347bf0302 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2369,44 +2369,48 @@ void X86Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = Low32Bits(v);
int32_t v_high = High32Bits(v);
if (buffer_.size() > 1) {
// Ensure we don't pass the end of the buffer.
for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 93ecdf52fe..b50fda907a 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -166,6 +166,39 @@ class Address : public Operand {
Init(base_in, disp.Int32Value());
}
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale_in, index_in, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ Init(base_in, index_in, scale_in, disp);
+ }
+
+ Address(Register base_in,
+ Register index_in,
+ ScaleFactor scale_in,
+ int32_t disp, AssemblerFixup *fixup) {
+ Init(base_in, index_in, scale_in, disp);
+ SetFixup(fixup);
+ }
+
+ static Address Absolute(uintptr_t addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset<4> addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
void Init(Register base_in, int32_t disp) {
if (disp == 0 && base_in != EBP) {
SetModRM(0, base_in);
@@ -181,14 +214,7 @@ class Address : public Operand {
}
}
- Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
- CHECK_NE(index_in, ESP); // Illegal addressing mode.
- SetModRM(0, ESP);
- SetSIB(scale_in, index_in, EBP);
- SetDisp32(disp);
- }
-
- Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ void Init(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
CHECK_NE(index_in, ESP); // Illegal addressing mode.
if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
@@ -203,20 +229,6 @@ class Address : public Operand {
SetDisp32(disp);
}
}
-
- static Address Absolute(uintptr_t addr) {
- Address result;
- result.SetModRM(0, EBP);
- result.SetDisp32(addr);
- return result;
- }
-
- static Address Absolute(ThreadOffset<4> addr) {
- return Absolute(addr.Int32Value());
- }
-
- private:
- Address() {}
};
@@ -252,40 +264,39 @@ class ConstantArea {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v);
+ size_t AddDouble(double v);
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v);
+ size_t AddFloat(float v);
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v);
+ size_t AddInt32(int32_t v);
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v);
+ size_t AddInt64(int64_t v);
bool IsEmpty() const {
return buffer_.size() == 0;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
-
- void AddFixup(AssemblerFixup* fixup) {
- fixups_.push_back(fixup);
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
}
- const std::vector<AssemblerFixup*>& GetFixups() const {
- return fixups_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
}
private:
- static constexpr size_t kEntrySize = sizeof(int32_t);
+ static constexpr size_t elem_size_ = sizeof(int32_t);
std::vector<int32_t> buffer_;
- std::vector<AssemblerFixup*> fixups_;
};
class X86Assembler FINAL : public Assembler {
@@ -740,26 +751,36 @@ class X86Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
- void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
private:
inline void EmitUint8(uint8_t value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 6e7d74d528..9eb5e67041 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -3122,7 +3122,14 @@ void X86_64Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
+ // Look for an existing match.
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
return i * elem_size_;
@@ -3130,12 +3137,10 @@ int ConstantArea::AddInt32(int32_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = v;
int32_t v_high = v >> 32;
if (buffer_.size() > 1) {
@@ -3148,18 +3153,18 @@ int ConstantArea::AddInt64(int64_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 255f551675..01d28e305d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -269,36 +269,40 @@ class Address : public Operand {
* Class to handle constant area values.
*/
class ConstantArea {
- public:
- ConstantArea() {}
+ public:
+ ConstantArea() {}
- // Add a double to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddDouble(double v);
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddDouble(double v);
- // Add a float to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddFloat(float v);
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddFloat(float v);
- // Add an int32_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt32(int32_t v);
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt32(int32_t v);
- // Add an int64_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt64(int64_t v);
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
- int GetSize() const {
- return buffer_.size() * elem_size_;
- }
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt64(int64_t v);
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
+ }
- private:
- static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
+ }
+
+ private:
+ static constexpr size_t elem_size_ = sizeof(int32_t);
+ std::vector<int32_t> buffer_;
};
@@ -806,19 +810,27 @@ class X86_64Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
@@ -826,6 +838,9 @@ class X86_64Assembler FINAL : public Assembler {
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; }
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
+
//
// Heap poisoning.
//
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 680e2d7b45..17c528209b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1401,7 +1401,7 @@ class Dex2Oat FINAL {
}
ScopedObjectAccess soa(self);
dex_caches_.push_back(soa.AddLocalReference<jobject>(
- class_linker->RegisterDexFile(*dex_file)));
+ class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f5f7748835..dbf536575a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1420,8 +1420,10 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
Handle<mirror::DexCache> dex_cache(
- hs->NewHandle(Runtime::Current()->GetClassLinker()->RegisterDexFile(*dex_file)));
+ hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file,
+ runtime->GetLinearAlloc())));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -2400,14 +2402,13 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
// Need to register dex files to get a working dex cache.
ScopedObjectAccess soa(self);
ClassLinker* class_linker = runtime->GetClassLinker();
- Runtime::Current()->GetOatFileManager().RegisterOatFile(
- std::unique_ptr<const OatFile>(oat_file));
+ runtime->GetOatFileManager().RegisterOatFile(std::unique_ptr<const OatFile>(oat_file));
std::vector<const DexFile*> class_path;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
- class_linker->RegisterDexFile(*dex_file);
+ class_linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc());
class_path.push_back(dex_file);
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c91577a7ac..4ce52f10f3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1189,7 +1189,9 @@ mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length
static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
-mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc) {
StackHandleScope<6> hs(self);
auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
@@ -1211,18 +1213,13 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
// NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
- if (sizeof(void*) == 8u && image_pointer_size_ == 4u) {
- // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
- // in the low 4GiB address space so that we can store pointers in 32-bit fields.
- // This is conveniently provided by the linear allocator.
- raw_arrays = reinterpret_cast<uint8_t*>(
- Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size())); // Zero-initialized.
- } else {
- raw_arrays = reinterpret_cast<uint8_t*>(calloc(layout.Size(), 1u)); // Zero-initialized.
- if (raw_arrays == nullptr) {
- return nullptr;
- }
- }
+ // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
+ // in the low 4GiB address space so that we can store pointers in 32-bit fields.
+ // This is conveniently provided by the linear allocator.
+ raw_arrays = reinterpret_cast<uint8_t*>(
+ (sizeof(void*) == 8u && image_pointer_size_ == 4u)
+ ? Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size()) // Zero-initialized.
+ : linear_alloc->Alloc(self, layout.Size())); // Zero-initialized.
}
GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
@@ -1593,7 +1590,9 @@ mirror::Class* ClassLinker::DefineClass(Thread* self,
self->AssertPendingOOMException();
return nullptr;
}
- mirror::DexCache* dex_cache = RegisterDexFile(dex_file);
+ mirror::DexCache* dex_cache = RegisterDexFile(
+ dex_file,
+ GetOrCreateAllocatorForClassLoader(class_loader.Get()));
if (dex_cache == nullptr) {
self->AssertPendingOOMException();
return nullptr;
@@ -2096,6 +2095,19 @@ LinearAlloc* ClassLinker::GetAllocatorForClassLoader(mirror::ClassLoader* class_
return allocator;
}
+LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) {
+ if (class_loader == nullptr) {
+ return Runtime::Current()->GetLinearAlloc();
+ }
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ LinearAlloc* allocator = class_loader->GetAllocator();
+ if (allocator == nullptr) {
+ allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loader->SetAllocator(allocator);
+ }
+ return allocator;
+}
+
void ClassLinker::LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
@@ -2254,7 +2266,10 @@ void ClassLinker::LoadMethod(Thread* self,
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(
+ self,
+ dex_file,
+ Runtime::Current()->GetLinearAlloc())));
CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
<< dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
@@ -2290,7 +2305,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
dex_cache->SetDexFile(&dex_file);
}
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
@@ -2303,7 +2318,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file, linear_alloc)));
WriterMutexLock mu(self, dex_lock_);
mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
@@ -3100,6 +3115,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
std::string descriptor(GetDescriptorForProxy(klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ // Needs to be before we insert the class so that the allocator field is set.
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
@@ -3107,9 +3125,6 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
CHECK(existing == nullptr);
- // Needs to be after we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
-
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
@@ -3489,28 +3504,31 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
if (!klass->IsInterface()) {
// Initialize interfaces with default methods for the JLS.
size_t num_direct_interfaces = klass->NumDirectInterfaces();
- for (size_t i = 0; i < num_direct_interfaces; i++) {
+ // Only setup the (expensive) handle scope if we actually need to.
+ if (UNLIKELY(num_direct_interfaces > 0)) {
StackHandleScope<1> hs_iface(self);
- Handle<mirror::Class> handle_scope_iface(
- hs_iface.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)));
- CHECK(handle_scope_iface.Get() != nullptr);
- CHECK(handle_scope_iface->IsInterface());
- if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
- // We have already done this once for this interface. Skip it.
- continue;
- }
- // We cannot just call initialize class directly because we need to ensure that ALL interfaces
- // with default methods are initialized. Non-default interface initialization will not affect
- // other non-default super-interfaces.
- bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
- handle_scope_iface,
- can_init_statics,
- can_init_parents);
- if (!iface_initialized) {
- ObjectLock<mirror::Class> lock(self, klass);
- // Initialization failed because one of our interfaces with default methods is erroneous.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
- return false;
+ MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr));
+ for (size_t i = 0; i < num_direct_interfaces; i++) {
+ handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass, i));
+ CHECK(handle_scope_iface.Get() != nullptr);
+ CHECK(handle_scope_iface->IsInterface());
+ if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
+ // We have already done this for this interface. Skip it.
+ continue;
+ }
+ // We cannot just call initialize class directly because we need to ensure that ALL
+ // interfaces with default methods are initialized. Non-default interface initialization
+ // will not affect other non-default super-interfaces.
+ bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
+ handle_scope_iface,
+ can_init_statics,
+ can_init_parents);
+ if (!iface_initialized) {
+ ObjectLock<mirror::Class> lock(self, klass);
+ // Initialization failed because one of our interfaces with default methods is erroneous.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ return false;
+ }
}
}
}
@@ -3612,18 +3630,22 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
bool can_init_parents) {
CHECK(iface->IsInterface());
size_t num_direct_ifaces = iface->NumDirectInterfaces();
- // First we initialize all of iface's super-interfaces recursively.
- for (size_t i = 0; i < num_direct_ifaces; i++) {
- mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
- if (!super_iface->HasBeenRecursivelyInitialized()) {
- // Recursive step
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> handle_super_iface(hs.NewHandle(super_iface));
- if (!InitializeDefaultInterfaceRecursive(self,
- handle_super_iface,
- can_init_statics,
- can_init_parents)) {
- return false;
+ // Only create the (expensive) handle scope if we need it.
+ if (UNLIKELY(num_direct_ifaces > 0)) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
+ // First we initialize all of iface's super-interfaces recursively.
+ for (size_t i = 0; i < num_direct_ifaces; i++) {
+ mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ if (!super_iface->HasBeenRecursivelyInitialized()) {
+ // Recursive step
+ handle_super_iface.Assign(super_iface);
+ if (!InitializeDefaultInterfaceRecursive(self,
+ handle_super_iface,
+ can_init_statics,
+ can_init_parents)) {
+ return false;
+ }
}
}
}
@@ -3948,13 +3970,13 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
ClassLoaderData data;
data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
data.class_table = class_table;
- data.allocator = Runtime::Current()->CreateLinearAlloc();
- class_loaders_.push_back(data);
// Don't already have a class table, add it to the class loader.
CHECK(class_loader->GetClassTable() == nullptr);
- CHECK(class_loader->GetAllocator() == nullptr);
class_loader->SetClassTable(data.class_table);
- class_loader->SetAllocator(data.allocator);
+ // Should have been set when we registered the dex file.
+ data.allocator = class_loader->GetAllocator();
+ CHECK(data.allocator != nullptr);
+ class_loaders_.push_back(data);
}
return class_table;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 93161f7bb7..a70967d49b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -319,7 +319,7 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- mirror::DexCache* RegisterDexFile(const DexFile& dex_file)
+ mirror::DexCache* RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
@@ -532,6 +532,12 @@ class ClassLinker {
static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
+ // set it. TODO: Consider using a lock other than classlinker_classes_lock_.
+ static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
private:
struct ClassLoaderData {
jweak weak_root; // Weak root to enable class unloading.
@@ -570,7 +576,9 @@ class ClassLinker {
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
+ mirror::DexCache* AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 6a6d1986dc..7b91b0b2b6 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -32,7 +32,7 @@ union PACKED(4) JValue {
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
- i = ((static_cast<int32_t>(new_b) << 24) >> 24); // Sign-extend.
+ j = ((static_cast<int64_t>(new_b) << 56) >> 56); // Sign-extend to 64 bits.
}
uint16_t GetC() const { return c; }
@@ -45,7 +45,9 @@ union PACKED(4) JValue {
void SetF(float new_f) { f = new_f; }
int32_t GetI() const { return i; }
- void SetI(int32_t new_i) { i = new_i; }
+ void SetI(int32_t new_i) {
+ j = ((static_cast<int64_t>(new_i) << 32) >> 32); // Sign-extend to 64 bits.
+ }
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
@@ -55,7 +57,7 @@ union PACKED(4) JValue {
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
- i = ((static_cast<int32_t>(new_s) << 16) >> 16); // Sign-extend.
+ j = ((static_cast<int64_t>(new_s) << 48) >> 48); // Sign-extend to 64 bits.
}
uint8_t GetZ() const { return z; }
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8fb860fa6b..48f2ca59e8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -20,9 +20,8 @@
#include "class_linker.h"
#include "common_runtime_test.h"
-#include "gc/heap.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "linear_alloc.h"
+#include "mirror/class_loader-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -36,7 +35,9 @@ TEST_F(DexCacheTest, Open) {
StackHandleScope<1> hs(soa.Self());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
- hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(),
+ *java_lang_dex_file_,
+ Runtime::Current()->GetLinearAlloc())));
ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
@@ -45,5 +46,21 @@ TEST_F(DexCacheTest, Open) {
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
}
+TEST_F(DexCacheTest, LinearAlloc) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader(LoadDex("Main"));
+ ASSERT_TRUE(jclass_loader != nullptr);
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LMain;", class_loader);
+ ASSERT_TRUE(klass != nullptr);
+ LinearAlloc* const linear_alloc = klass->GetClassLoader()->GetAllocator();
+ EXPECT_NE(linear_alloc, runtime->GetLinearAlloc());
+ EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods()));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 255a0f23d8..81e7e6d675 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -274,7 +274,7 @@ void Monitor::Lock(Thread* self) {
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
const char* owners_filename;
- uint32_t owners_line_number;
+ int32_t owners_line_number;
TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
LOG(WARNING) << "Long monitor contention event with owner method="
@@ -1086,7 +1086,7 @@ bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
}
void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc,
- const char** source_file, uint32_t* line_number) const {
+ const char** source_file, int32_t* line_number) const {
// If method is null, location is unknown
if (method == nullptr) {
*source_file = "";
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 61235efd89..707d0f112c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -183,7 +183,7 @@ class Monitor {
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number)
+ const char* owner_filename, int32_t owner_line_number)
SHARED_REQUIRES(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
@@ -235,7 +235,7 @@ class Monitor {
// Translates the provided method and pc into its declaring class' source file and line number.
void TranslateLocation(ArtMethod* method, uint32_t pc,
- const char** source_file, uint32_t* line_number) const
+ const char** source_file, int32_t* line_number) const
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index efe2e823d9..82ef2d841a 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -50,7 +50,7 @@ static char* EventLogWriteString(char* dst, const char* value, size_t len) {
}
void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number) {
+ const char* owner_filename, int32_t owner_line_number) {
// Emit the event list length, 1 byte.
char eventBuffer[174];
char* cp = eventBuffer;
@@ -80,7 +80,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
const char* filename;
- uint32_t line_number;
+ int32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
cp = EventLogWriteString(cp, filename, strlen(filename));
diff --git a/runtime/monitor_linux.cc b/runtime/monitor_linux.cc
index 856ebe45f9..1c77ac0eb3 100644
--- a/runtime/monitor_linux.cc
+++ b/runtime/monitor_linux.cc
@@ -18,7 +18,7 @@
namespace art {
-void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, uint32_t) {
+void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, int32_t) {
}
} // namespace art
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 1a6beadd37..4eea3f39f7 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -74,7 +74,7 @@ static jlongArray ConvertDexFilesToJavaArray(JNIEnv* env,
const OatFile* oat_file,
std::vector<std::unique_ptr<const DexFile>>& vec) {
// Add one for the oat file.
- jlongArray long_array = env->NewLongArray(static_cast<jsize>(1u + vec.size()));
+ jlongArray long_array = env->NewLongArray(static_cast<jsize>(kDexFileIndexStart + vec.size()));
if (env->ExceptionCheck() == JNI_TRUE) {
return nullptr;
}
@@ -230,7 +230,8 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
}
}
- if (all_deleted) {
+ // oat_file can be null if we are running without dex2oat.
+ if (all_deleted && oat_file != nullptr) {
// If all of the dex files are no longer in use we can unmap the corresponding oat file.
VLOG(class_linker) << "Unregistering " << oat_file;
runtime->GetOatFileManager().UnRegisterAndDeleteOatFile(oat_file);
@@ -263,10 +264,12 @@ static jclass DexFile_defineClassNative(JNIEnv* env,
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->RegisterDexFile(*dex_file);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
+ class_linker->RegisterDexFile(
+ *dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()));
mirror::Class* result = class_linker->DefineClass(soa.Self(),
descriptor.c_str(),
hash,
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4f957233c4..4c5dc3ad25 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -497,7 +497,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc())));
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 3371a3955e..9eee156bb0 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,7 +22,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
-#include "dex_file.h"
+#include "dex_file-inl.h"
#include "gc/space/image_space.h"
#include "oat_file_assistant.h"
#include "thread-inl.h"
@@ -30,7 +30,9 @@
namespace art {
// For b/21333911.
-static constexpr bool kDuplicateClassesCheck = false;
+// Only enabled for debug builds to prevent bit rot. There are too many performance regressions for
+// normal builds.
+static constexpr bool kDuplicateClassesCheck = kIsDebugBuild;
const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
@@ -115,9 +117,9 @@ class DexFileAndClassPair : ValueObject {
current_class_index_(current_class_index),
from_loaded_oat_(from_loaded_oat) {}
- DexFileAndClassPair(DexFileAndClassPair&& rhs) = default;
+ DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
- DexFileAndClassPair& operator=(DexFileAndClassPair&& rhs) = default;
+ DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) = default;
const char* GetCachedDescriptor() const {
return cached_descriptor_;
@@ -139,7 +141,7 @@ class DexFileAndClassPair : ValueObject {
void Next() {
++current_class_index_;
- cached_descriptor_ = nullptr;
+ cached_descriptor_ = GetClassDescriptor(dex_file_.get(), current_class_index_);
}
size_t GetCurrentClassIndex() const {
@@ -162,7 +164,7 @@ class DexFileAndClassPair : ValueObject {
}
const char* cached_descriptor_;
- std::unique_ptr<const DexFile> dex_file_;
+ std::shared_ptr<const DexFile> dex_file_;
size_t current_class_index_;
bool from_loaded_oat_; // We only need to compare mismatches between what we load now
// and what was loaded before. Any old duplicates must have been
@@ -215,8 +217,17 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Add dex files from already loaded oat files, but skip boot.
const OatFile* boot_oat = GetBootOatFile();
+ // The same OatFile can be loaded multiple times at different addresses. In this case, we don't
+ // need to check both against each other since they would have resolved the same way at compile
+ // time.
+ std::unordered_set<std::string> unique_locations;
for (const std::unique_ptr<const OatFile>& loaded_oat_file : oat_files_) {
- if (loaded_oat_file.get() != boot_oat) {
+ DCHECK_NE(loaded_oat_file.get(), oat_file);
+ const std::string& location = loaded_oat_file->GetLocation();
+ if (loaded_oat_file.get() != boot_oat &&
+ location != oat_file->GetLocation() &&
+ unique_locations.find(location) == unique_locations.end()) {
+ unique_locations.insert(location);
AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue);
}
}
@@ -232,12 +243,12 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Now drain the queue.
while (!queue.empty()) {
// Modifying the top element is only safe if we pop right after.
- DexFileAndClassPair compare_pop(std::move(const_cast<DexFileAndClassPair&>(queue.top())));
+ DexFileAndClassPair compare_pop(queue.top());
queue.pop();
// Compare against the following elements.
while (!queue.empty()) {
- DexFileAndClassPair top(std::move(const_cast<DexFileAndClassPair&>(queue.top())));
+ DexFileAndClassPair top(queue.top());
if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
// Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
@@ -249,7 +260,6 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
top.GetDexFile()->GetLocation().c_str());
return true;
}
- // Pop it.
queue.pop();
AddNext(&top, &queue);
} else {
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 33c90e3000..02c93cf864 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -391,6 +391,34 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
}
}
+// Check whether there is another register in the search map that is locked the same way as the
+// register in the src map. This establishes an alias.
+static bool FindLockAliasedRegister(
+ uint32_t src,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+ auto it = src_map.find(src);
+ if (it == src_map.end()) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+ uint32_t src_lock_levels = it->second;
+ if (src_lock_levels == 0) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+
+ // Scan the map for the same value.
+ for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+ if (pair.first != src && pair.second == src_lock_levels) {
+ return true;
+ }
+ }
+
+ // Nothing found, no alias.
+ return false;
+}
+
bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
bool changed = false;
DCHECK(incoming_line != nullptr);
@@ -417,9 +445,29 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
size_t depths = reg_to_lock_depths_.count(idx);
size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
if (depths != incoming_depths) {
- if (depths == 0 || incoming_depths == 0) {
- reg_to_lock_depths_.erase(idx);
- } else {
+ // Stack levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // | |
+ // v0 = v1 {v0=1, v1=1} v0 = v2 {v1=1}
+ // | |
+ // {v1=1}
+ // // Dropping v0, as the status can't be merged
+ // // but the lock info ("locked at depth 1" and)
+ // // "not locked at all") is available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "mismatched stack depths for register v" << idx
@@ -429,20 +477,51 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
}
break;
}
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
} else if (depths > 0) {
// Check whether they're actually the same levels.
uint32_t locked_levels = reg_to_lock_depths_.find(idx)->second;
uint32_t incoming_locked_levels = incoming_line->reg_to_lock_depths_.find(idx)->second;
if (locked_levels != incoming_locked_levels) {
- verifier->Fail(VERIFY_ERROR_LOCKING);
- if (kDumpLockFailures) {
- LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
- << std::hex << locked_levels << std::dec << " != "
- << std::hex << incoming_locked_levels << std::dec << " in "
- << PrettyMethod(verifier->GetMethodReference().dex_method_index,
- *verifier->GetMethodReference().dex_file);
+ // Lock levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // lock v2 {v1=1, v2=2}
+ // | |
+ // v0 = v1 {v0=1, v1=1, v2=2} v0 = v2 {v0=2, v1=1, v2=2}
+ // | |
+ // {v1=1, v2=2}
+ // // Dropping v0, as the status can't be
+ // // merged but the lock info ("locked at
+ // // depth 1" and "locked at depth 2") is
+ // // available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
+ // No aliases for both current and incoming, we'll lose information.
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
+ << std::hex << locked_levels << std::dec << " != "
+ << std::hex << incoming_locked_levels << std::dec << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
+ break;
}
- break;
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
}
}
}
diff --git a/test/088-monitor-verification/src/TwoPath.java b/test/088-monitor-verification/src/TwoPath.java
index 2542de7f27..bdc15ad82e 100644
--- a/test/088-monitor-verification/src/TwoPath.java
+++ b/test/088-monitor-verification/src/TwoPath.java
@@ -31,6 +31,8 @@ public class TwoPath {
* Conditionally uses one of the synchronized objects.
*/
public static void twoPath(Object obj1, Object obj2, int x) {
+ Main.assertIsManaged();
+
Object localObj;
synchronized (obj1) {
diff --git a/test/131-structural-change/expected.txt b/test/131-structural-change/expected.txt
index cc7713d252..1d19278f1e 100644
--- a/test/131-structural-change/expected.txt
+++ b/test/131-structural-change/expected.txt
@@ -1,2 +1,3 @@
+JNI_OnLoad called
Should really reach here.
Done.
diff --git a/test/131-structural-change/src/Main.java b/test/131-structural-change/src/Main.java
index 6cbbd12387..c7488992df 100644
--- a/test/131-structural-change/src/Main.java
+++ b/test/131-structural-change/src/Main.java
@@ -35,7 +35,7 @@ public class Main {
e.printStackTrace(System.out);
}
- boolean haveOatFile = hasOat();
+ boolean haveOatFile = hasOatFile();
boolean gotError = false;
try {
Class<?> bClass = getClass().getClassLoader().loadClass("B");
@@ -45,10 +45,10 @@ public class Main {
e.printStackTrace(System.out);
}
if (haveOatFile ^ gotError) {
- System.out.println("Did not get expected error.");
+ System.out.println("Did not get expected error. " + haveOatFile + " " + gotError);
}
System.out.println("Done.");
}
- private native static boolean hasOat();
+ private native static boolean hasOatFile();
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ad64b68ee2..e114a2e9f3 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -214,19 +214,24 @@ TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
055-enum-performance \
133-static-invoke-super
+# disable timing sensitive tests on "dist" builds.
+ifdef dist_goal
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
# Tests that require python3.
TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
961-default-iface-resolution-generated \
964-default-iface-init-generated \
-# disable timing sensitive tests on "dist" builds.
-ifdef dist_goal
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+# Check if we have python3 to run our tests.
+ifeq ($(wildcard /usr/bin/python3),)
+ $(warning "No python3 found. Disabling tests: $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS)")
- # Currently disable tsts requiring python3.
+ # Currently disable tests requiring python3 when it is not installed.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
@@ -324,13 +329,15 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUIL
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
-# in tests 138.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# in tests 138. Blacklisted for debug builds since these builds have duplicate classes checks which
+# punt to interpreter.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),debug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
-# 138-duplicate-classes-check. Turned off temporarily, b/21333911.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# 138-duplicate-classes-check. Turned on for debug builds since debug builds have duplicate classes
+# checks enabled, b/2133391.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))