summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/instruction_builder.cc18
-rw-r--r--compiler/optimizing/instruction_simplifier.cc21
-rw-r--r--compiler/optimizing/nodes.cc14
-rw-r--r--compiler/optimizing/nodes.h16
-rw-r--r--compiler/optimizing/nodes_vector.h10
-rw-r--r--compiler/optimizing/optimizing_compiler.cc18
-rw-r--r--compiler/optimizing/reference_type_propagation.cc6
-rw-r--r--compiler/optimizing/scheduler.h5
-rw-r--r--compiler/optimizing/scheduler_arm64.h14
10 files changed, 97 insertions, 29 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a175c21760..8750910fe1 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -294,7 +294,7 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
// as there may be different class loaders. So only return the index if it's
// the right class already resolved with the class loader.
if (index.IsValid()) {
- ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType(
+ ObjPtr<mirror::Class> resolved = compilation_unit.GetClassLinker()->LookupResolvedType(
index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
if (resolved != cls) {
index = dex::TypeIndex::Invalid();
@@ -682,7 +682,7 @@ HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile(
<< "is invalid in location" << dex_cache->GetDexFile()->GetLocation();
return kInlineCacheNoData;
}
- ObjPtr<mirror::Class> clazz = ClassLinker::LookupResolvedType(
+ ObjPtr<mirror::Class> clazz = caller_compilation_unit_.GetClassLinker()->LookupResolvedType(
class_ref.type_index,
dex_cache,
caller_compilation_unit_.GetClassLoader().Get());
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index bce4de32d5..e36d91fb05 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -442,17 +442,15 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
return false;
}
};
- const uint32_t num_instructions = code_item_->insns_size_in_code_units_;
+ CodeItemDebugInfoAccessor accessor(dex_file_, code_item_);
ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
- num_instructions,
+ accessor.InsnsSizeInCodeUnits(),
/* expandable */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex_file_, code_item_);
- dex_file_->DecodeDebugPositionInfo(code_item_, debug_info_offset, Callback::Position, locations);
+ dex_file_->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), Callback::Position, locations);
// Instruction-specific tweaks.
- IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- for (const DexInstructionPcPair& inst : instructions) {
+ for (const DexInstructionPcPair& inst : accessor) {
switch (inst->Opcode()) {
case Instruction::MOVE_EXCEPTION: {
// Stop in native debugger after the exception has been moved.
@@ -461,7 +459,7 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
locations->ClearBit(inst.DexPc());
DexInstructionIterator next = std::next(DexInstructionIterator(inst));
DCHECK(next.DexPc() != inst.DexPc());
- if (next != instructions.end()) {
+ if (next != accessor.end()) {
locations->SetBit(next.DexPc());
}
break;
@@ -796,7 +794,6 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
ArtMethod* resolved_method =
class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
- *dex_compilation_unit_->GetDexFile(),
method_idx,
dex_compilation_unit_->GetDexCache(),
class_loader,
@@ -831,7 +828,6 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
return nullptr;
}
ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
- *dex_compilation_unit_->GetDexFile(),
dex_compilation_unit_->GetDexFile()->GetMethodId(method_idx).class_idx_,
dex_compilation_unit_->GetDexCache().Get(),
class_loader.Get());
@@ -1425,7 +1421,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
}
static ObjPtr<mirror::Class> GetClassFrom(CompilerDriver* driver,
- const DexCompilationUnit& compilation_unit) {
+ const DexCompilationUnit& compilation_unit) {
ScopedObjectAccess soa(Thread::Current());
Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
@@ -2934,7 +2930,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
dex::TypeIndex type_index,
const DexCompilationUnit& compilation_unit) const {
- return ClassLinker::LookupResolvedType(
+ return compilation_unit.GetClassLinker()->LookupResolvedType(
type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index a42a85dc1d..53e449bbbe 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -994,6 +994,27 @@ void InstructionSimplifierVisitor::VisitIf(HIf* instruction) {
instruction->GetBlock()->SwapSuccessors();
RecordSimplification();
}
+ HInstruction* input = instruction->InputAt(0);
+
+ // If a condition 'cond' is evaluated in an HIf instruction then in the successors of the
+ // IF_BLOCK we statically know the value of the condition (TRUE in TRUE_SUCC, FALSE in
+ // FALSE_SUCC). Using that we can replace another evaluation (use) EVAL of the same 'cond'
+ // with TRUE value (FALSE value) if every path from the ENTRY_BLOCK to EVAL_BLOCK contains the
+ // edge HIF_BLOCK->TRUE_SUCC (HIF_BLOCK->FALSE_SUCC).
+ if (!input->IsConstant()) {
+ HBasicBlock* true_succ = instruction->IfTrueSuccessor();
+ HBasicBlock* false_succ = instruction->IfFalseSuccessor();
+
+ DCHECK_EQ(true_succ->GetPredecessors().size(), 1u);
+ input->ReplaceUsesDominatedBy(
+ true_succ->GetFirstInstruction(), GetGraph()->GetIntConstant(1), /* strictly */ false);
+ RecordSimplification();
+
+ DCHECK_EQ(false_succ->GetPredecessors().size(), 1u);
+ input->ReplaceUsesDominatedBy(
+ false_succ->GetFirstInstruction(), GetGraph()->GetIntConstant(0), /* strictly */ false);
+ RecordSimplification();
+ }
}
void InstructionSimplifierVisitor::VisitArrayLength(HArrayLength* instruction) {
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 5f33ed6303..d39c2aded5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1111,10 +1111,10 @@ bool HInstructionList::FoundBefore(const HInstruction* instruction1,
return true;
}
-bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
+bool HInstruction::Dominates(HInstruction* other_instruction, bool strictly) const {
if (other_instruction == this) {
// An instruction does not strictly dominate itself.
- return false;
+ return !strictly;
}
HBasicBlock* block = GetBlock();
HBasicBlock* other_block = other_instruction->GetBlock();
@@ -1148,6 +1148,10 @@ bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
}
}
+bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
+ return Dominates(other_instruction, /* strictly */ true);
+}
+
void HInstruction::RemoveEnvironment() {
RemoveEnvironmentUses(this);
environment_ = nullptr;
@@ -1170,14 +1174,16 @@ void HInstruction::ReplaceWith(HInstruction* other) {
DCHECK(env_uses_.empty());
}
-void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement) {
+void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator,
+ HInstruction* replacement,
+ bool strictly) {
const HUseList<HInstruction*>& uses = GetUses();
for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
HInstruction* user = it->GetUser();
size_t index = it->GetIndex();
// Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
++it;
- if (dominator->StrictlyDominates(user)) {
+ if (dominator->Dominates(user, strictly)) {
user->ReplaceInput(replacement, index);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 42a9d95b9a..affd54ed72 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2098,9 +2098,13 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return IsRemovable() && !HasUses();
}
- // Does this instruction strictly dominate `other_instruction`?
- // Returns false if this instruction and `other_instruction` are the same.
+ // Does this instruction dominate (strictly or in regular sense depending on 'strictly')
+ // `other_instruction`?
+ // Returns '!strictly' if this instruction and `other_instruction` are the same.
// Aborts if this instruction and `other_instruction` are both phis.
+ bool Dominates(HInstruction* other_instruction, bool strictly) const;
+
+ // Return 'Dominates(other_instruction, /*strictly*/ true)'.
bool StrictlyDominates(HInstruction* other_instruction) const;
int GetId() const { return id_; }
@@ -2161,7 +2165,13 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
void SetLocations(LocationSummary* locations) { locations_ = locations; }
void ReplaceWith(HInstruction* instruction);
- void ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
+
+ // Replace all uses of the instruction which are dominated by 'dominator' with 'replacement'.
+ // 'strictly' determines whether strict or regular domination relation should be checked.
+ void ReplaceUsesDominatedBy(HInstruction* dominator,
+ HInstruction* replacement,
+ bool strictly = true);
+
void ReplaceInput(HInstruction* replacement, size_t index);
// This is almost the same as doing `ReplaceWith()`. But in this helper, the
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 096349fd73..87dff8403b 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -109,6 +109,16 @@ class HVecOperation : public HVariableInputSizeInstruction {
// Assumes vector nodes cannot be moved by default. Each concrete implementation
// that can be moved should override this method and return true.
+ //
+ // Note: similar approach is used for instruction scheduling (if it is turned on for the target):
+ // by default HScheduler::IsSchedulable returns false for a particular HVecOperation.
+ // HScheduler${ARCH}::IsSchedulable can be overridden to return true for an instruction (see
+ // scheduler_arm64.h for example) if it is safe to schedule it; in this case one *must* also
+ // look at/update HScheduler${ARCH}::IsSchedulingBarrier for this instruction.
+ //
+ // Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be
+ // altered to return true if the instruction might reside outside the SIMD loop body since SIMD
+ // registers are not kept alive across vector loop boundaries (yet).
bool CanBeMoved() const OVERRIDE { return false; }
// Tests if all data of a vector node (vector length and packed type) is equal.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 73c72fc57a..24b1a123ee 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1224,7 +1224,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
- if (compiler_options.GetGenerateDebugInfo()) {
+ if (compiler_options.GenerateAnyDebugInfo()) {
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
@@ -1244,10 +1244,13 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = nullptr;
info.cfi = jni_compiled_method.GetCfi();
- std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
GetCompilerDriver()->GetInstructionSet(),
GetCompilerDriver()->GetInstructionSetFeatures(),
- ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ mini_debug_info,
+ info);
CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
@@ -1352,7 +1355,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
- if (compiler_options.GetGenerateDebugInfo()) {
+ if (compiler_options.GenerateAnyDebugInfo()) {
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
@@ -1372,10 +1375,13 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
- std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
GetCompilerDriver()->GetInstructionSet(),
GetCompilerDriver()->GetInstructionSetFeatures(),
- ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ mini_debug_info,
+ info);
CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d84f14acc0..8bb124e066 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -544,7 +544,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
// the method is from the String class, the null loader is good enough.
Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
+ invoke->GetDexMethodIndex(), dex_cache, loader, /* referrer */ nullptr, kDirect);
DCHECK(method != nullptr);
mirror::Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -576,8 +576,8 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
- ObjPtr<mirror::Class> klass =
- ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get());
+ ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ type_idx, dex_cache, class_loader_.Get());
SetClassAsTypeInfo(instr, klass, is_exact);
}
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index bb7c353bc2..dfa077f7de 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -462,6 +462,11 @@ class HScheduler {
// containing basic block from being scheduled.
// This method is used to restrict scheduling to instructions that we know are
// safe to handle.
+ //
+ // For newly introduced instructions by default HScheduler::IsSchedulable returns false.
+ // HScheduler${ARCH}::IsSchedulable can be overridden to return true for an instruction (see
+ // scheduler_arm64.h for example) if it is safe to schedule it; in this case one *must* also
+ // look at/update HScheduler${ARCH}::IsSchedulingBarrier for this instruction.
virtual bool IsSchedulable(const HInstruction* instruction) const;
bool IsSchedulable(const HBasicBlock* block) const;
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 32f161f26a..f71cb5b784 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -151,6 +151,20 @@ class HSchedulerARM64 : public HScheduler {
#undef CASE_INSTRUCTION_KIND
}
+ // Treat as scheduling barriers those vector instructions whose live ranges exceed the vectorized
+ // loop boundaries. This is a workaround for the lack of notion of SIMD register in the compiler;
+ // around a call we have to save/restore all live SIMD&FP registers (only lower 64 bits of
+ // SIMD&FP registers are callee saved) so don't reorder such vector instructions.
+ //
+ // TODO: remove this when a proper support of SIMD registers is introduced to the compiler.
+ bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE {
+ return HScheduler::IsSchedulingBarrier(instr) ||
+ instr->IsVecReduce() ||
+ instr->IsVecExtractScalar() ||
+ instr->IsVecSetScalars() ||
+ instr->IsVecReplicateScalar();
+ }
+
private:
SchedulingLatencyVisitorARM64 arm64_latency_visitor_;
DISALLOW_COPY_AND_ASSIGN(HSchedulerARM64);