summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.h15
-rw-r--r--compiler/optimizing/code_generator_arm.cc91
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc86
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h8
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc16
-rw-r--r--compiler/optimizing/inliner.cc345
-rw-r--r--compiler/optimizing/inliner.h49
-rw-r--r--compiler/optimizing/instruction_builder.cc47
-rw-r--r--compiler/optimizing/instruction_builder.h11
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc26
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc16
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--compiler/optimizing/reference_type_propagation.cc44
-rw-r--r--compiler/optimizing/reference_type_propagation.h3
-rw-r--r--compiler/optimizing/reference_type_propagation_test.cc1
-rw-r--r--compiler/optimizing/ssa_builder.cc6
-rw-r--r--compiler/optimizing/ssa_builder.h3
17 files changed, 535 insertions, 258 deletions
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 3a4c9dbd16..e4ad4222fb 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,10 +54,7 @@ class HGraphBuilder : public ValueObject {
compiler_driver_(driver),
compilation_stats_(compiler_stats),
block_builder_(graph, dex_file, code_item),
- ssa_builder_(graph,
- dex_compilation_unit->GetClassLoader(),
- dex_compilation_unit->GetDexCache(),
- handles),
+ ssa_builder_(graph, dex_compilation_unit->GetDexCache(), handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -83,12 +80,10 @@ class HGraphBuilder : public ValueObject {
code_item_(code_item),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
+ null_dex_cache_(),
compilation_stats_(nullptr),
block_builder_(graph, nullptr, code_item),
- ssa_builder_(graph,
- handles->NewHandle<mirror::ClassLoader>(nullptr),
- handles->NewHandle<mirror::DexCache>(nullptr),
- handles),
+ ssa_builder_(graph, null_dex_cache_, handles),
instruction_builder_(graph,
&block_builder_,
&ssa_builder_,
@@ -101,7 +96,7 @@ class HGraphBuilder : public ValueObject {
/* code_generator */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- handles->NewHandle<mirror::DexCache>(nullptr),
+ null_dex_cache_,
handles) {}
GraphAnalysisResult BuildGraph();
@@ -122,6 +117,8 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* const compiler_driver_;
+ ScopedNullHandle<mirror::DexCache> null_dex_cache_;
+
OptimizingCompilerStats* compilation_stats_;
HBasicBlockBuilder block_builder_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index bab626f5ae..e34f116b75 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5304,18 +5304,29 @@ bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value,
return true;
}
Opcode neg_opcode = kNoOperand;
+ uint32_t neg_value = 0;
switch (opcode) {
- case AND: neg_opcode = BIC; value = ~value; break;
- case ORR: neg_opcode = ORN; value = ~value; break;
- case ADD: neg_opcode = SUB; value = -value; break;
- case ADC: neg_opcode = SBC; value = ~value; break;
- case SUB: neg_opcode = ADD; value = -value; break;
- case SBC: neg_opcode = ADC; value = ~value; break;
- case MOV: neg_opcode = MVN; value = ~value; break;
+ case AND: neg_opcode = BIC; neg_value = ~value; break;
+ case ORR: neg_opcode = ORN; neg_value = ~value; break;
+ case ADD: neg_opcode = SUB; neg_value = -value; break;
+ case ADC: neg_opcode = SBC; neg_value = ~value; break;
+ case SUB: neg_opcode = ADD; neg_value = -value; break;
+ case SBC: neg_opcode = ADC; neg_value = ~value; break;
+ case MOV: neg_opcode = MVN; neg_value = ~value; break;
default:
return false;
}
- return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, value, set_cc, &so);
+
+ if (assembler->ShifterOperandCanHold(kNoRegister,
+ kNoRegister,
+ neg_opcode,
+ neg_value,
+ set_cc,
+ &so)) {
+ return true;
+ }
+
+ return opcode == AND && IsPowerOfTwo(value + 1);
}
void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
@@ -6217,21 +6228,59 @@ void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+ // If both index and length are constants we can statically check the bounds. But if at least one
+ // of them is not encodable ArmEncodableConstantOrRegister will create
+ // Location::RequiresRegister() which is not desired to happen. Instead we create constant
+ // locations.
+ bool both_const = index->IsConstant() && length->IsConstant();
+ locations->SetInAt(0, both_const
+ ? Location::ConstantLocation(index->AsConstant())
+ : ArmEncodableConstantOrRegister(index, CMP));
+ locations->SetInAt(1, both_const
+ ? Location::ConstantLocation(length->AsConstant())
+ : ArmEncodableConstantOrRegister(length, CMP));
}
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCodeARM* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
- codegen_->AddSlowPath(slow_path);
-
- Register index = locations->InAt(0).AsRegister<Register>();
- Register length = locations->InAt(1).AsRegister<Register>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = helpers::Int32ConstantFrom(length_loc);
+ if (index_loc.IsConstant()) {
+ // BCE will remove the bounds check if we are guaranteed to pass.
+ int32_t index = helpers::Int32ConstantFrom(index_loc);
+ if (index < 0 || index >= length) {
+ SlowPathCodeARM* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel());
+ } else {
+ // Some optimization after BCE may have generated this, and we should not
+ // generate a bounds check if it is a valid range.
+ }
+ return;
+ }
- __ cmp(index, ShifterOperand(length));
- __ b(slow_path->GetEntryLabel(), HS);
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
+ __ cmp(index_loc.AsRegister<Register>(), ShifterOperand(length));
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), HS);
+ } else {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
+ if (index_loc.IsConstant()) {
+ int32_t index = helpers::Int32ConstantFrom(index_loc);
+ __ cmp(length_loc.AsRegister<Register>(), ShifterOperand(index));
+ } else {
+ __ cmp(length_loc.AsRegister<Register>(), ShifterOperand(index_loc.AsRegister<Register>()));
+ }
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), LS);
+ }
}
void CodeGeneratorARM::MarkGCCard(Register temp,
@@ -7571,9 +7620,11 @@ void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first,
ShifterOperand so;
if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, AND, value, &so)) {
__ and_(out, first, so);
- } else {
- DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so));
+ } else if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so)) {
__ bic(out, first, ShifterOperand(~value));
+ } else {
+ DCHECK(IsPowerOfTwo(value + 1));
+ __ ubfx(out, first, 0, WhichPowerOf2(value + 1));
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index a1f30cd2b2..f5ada5224b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5315,18 +5315,24 @@ bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(uint32_t value,
return true;
}
Opcode neg_opcode = kNoOperand;
+ uint32_t neg_value = 0;
switch (opcode) {
- case AND: neg_opcode = BIC; value = ~value; break;
- case ORR: neg_opcode = ORN; value = ~value; break;
- case ADD: neg_opcode = SUB; value = -value; break;
- case ADC: neg_opcode = SBC; value = ~value; break;
- case SUB: neg_opcode = ADD; value = -value; break;
- case SBC: neg_opcode = ADC; value = ~value; break;
- case MOV: neg_opcode = MVN; value = ~value; break;
+ case AND: neg_opcode = BIC; neg_value = ~value; break;
+ case ORR: neg_opcode = ORN; neg_value = ~value; break;
+ case ADD: neg_opcode = SUB; neg_value = -value; break;
+ case ADC: neg_opcode = SBC; neg_value = ~value; break;
+ case SUB: neg_opcode = ADD; neg_value = -value; break;
+ case SBC: neg_opcode = ADC; neg_value = ~value; break;
+ case MOV: neg_opcode = MVN; neg_value = ~value; break;
default:
return false;
}
- return assembler->ShifterOperandCanHold(neg_opcode, value, set_cc);
+
+ if (assembler->ShifterOperandCanHold(neg_opcode, neg_value, set_cc)) {
+ return true;
+ }
+
+ return opcode == AND && IsPowerOfTwo(value + 1);
}
void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
@@ -6264,20 +6270,56 @@ void LocationsBuilderARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+ // If both index and length are constants we can statically check the bounds. But if at least one
+ // of them is not encodable ArmEncodableConstantOrRegister will create
+ // Location::RequiresRegister() which is not desired to happen. Instead we create constant
+ // locations.
+ bool both_const = index->IsConstant() && length->IsConstant();
+ locations->SetInAt(0, both_const
+ ? Location::ConstantLocation(index->AsConstant())
+ : ArmEncodableConstantOrRegister(index, CMP));
+ locations->SetInAt(1, both_const
+ ? Location::ConstantLocation(length->AsConstant())
+ : ArmEncodableConstantOrRegister(length, CMP));
}
void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
- SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
- codegen_->AddSlowPath(slow_path);
-
- vixl32::Register index = InputRegisterAt(instruction, 0);
- vixl32::Register length = InputRegisterAt(instruction, 1);
+ LocationSummary* locations = instruction->GetLocations();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = Int32ConstantFrom(length_loc);
+ if (index_loc.IsConstant()) {
+ // BCE will remove the bounds check if we are guaranteed to pass.
+ int32_t index = Int32ConstantFrom(index_loc);
+ if (index < 0 || index >= length) {
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // Some optimization after BCE may have generated this, and we should not
+ // generate a bounds check if it is a valid range.
+ }
+ return;
+ }
- __ Cmp(index, length);
- __ B(hs, slow_path->GetEntryLabel());
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ __ Cmp(RegisterFrom(index_loc), length);
+ codegen_->AddSlowPath(slow_path);
+ __ B(hs, slow_path->GetEntryLabel());
+ } else {
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ __ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
+ codegen_->AddSlowPath(slow_path);
+ __ B(ls, slow_path->GetEntryLabel());
+ }
}
void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp,
@@ -7631,10 +7673,12 @@ void InstructionCodeGeneratorARMVIXL::GenerateAndConst(vixl32::Register out,
return;
}
if (GetAssembler()->ShifterOperandCanHold(AND, value)) {
- __ And(out, first, value);
+ __ And(out, first, value);
+ } else if (GetAssembler()->ShifterOperandCanHold(BIC, ~value)) {
+ __ Bic(out, first, ~value);
} else {
- DCHECK(GetAssembler()->ShifterOperandCanHold(BIC, ~value));
- __ Bic(out, first, ~value);
+ DCHECK(IsPowerOfTwo(value + 1));
+ __ Ubfx(out, first, 0, WhichPowerOf2(value + 1));
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 781027ab30..ef01a478f4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -35,11 +35,11 @@
#include "aarch32/macro-assembler-aarch32.h"
#pragma GCC diagnostic pop
-// Default to use the VIXL-based backend on ARM.
-#ifdef ART_USE_OLD_ARM_BACKEND
-static constexpr bool kArmUseVIXL32 = false;
-#else
+// True if VIXL32 should be used for codegen on ARM.
+#ifdef ART_USE_VIXL_ARM_BACKEND
static constexpr bool kArmUseVIXL32 = true;
+#else
+static constexpr bool kArmUseVIXL32 = false;
#endif
namespace art {
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 0c832a5c35..cfcb276a98 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -17,23 +17,23 @@
#include "dex_cache_array_fixups_arm.h"
#include "base/arena_containers.h"
-#ifdef ART_USE_OLD_ARM_BACKEND
-#include "code_generator_arm.h"
-#include "intrinsics_arm.h"
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
#include "code_generator_arm_vixl.h"
#include "intrinsics_arm_vixl.h"
+#else
+#include "code_generator_arm.h"
+#include "intrinsics_arm.h"
#endif
#include "utils/dex_cache_arrays_layout-inl.h"
namespace art {
namespace arm {
-#ifdef ART_USE_OLD_ARM_BACKEND
-typedef CodeGeneratorARM CodeGeneratorARMType;
-typedef IntrinsicLocationsBuilderARM IntrinsicLocationsBuilderARMType;
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
typedef CodeGeneratorARMVIXL CodeGeneratorARMType;
typedef IntrinsicLocationsBuilderARMVIXL IntrinsicLocationsBuilderARMType;
+#else
+typedef CodeGeneratorARM CodeGeneratorARMType;
+typedef IntrinsicLocationsBuilderARM IntrinsicLocationsBuilderARMType;
#endif
/**
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0b96005a17..2e45149b43 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -192,9 +192,9 @@ static uint32_t FindMethodIndexIn(ArtMethod* method,
}
static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
- const DexCompilationUnit& compilation_unit)
+ const DexFile& dex_file,
+ Handle<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = *compilation_unit.GetDexFile();
dex::TypeIndex index;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
@@ -203,19 +203,22 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls,
DCHECK(cls->IsProxyClass()) << cls->PrettyClass();
// TODO: deal with proxy classes.
} else if (IsSameDexFile(cls->GetDexFile(), dex_file)) {
- DCHECK_EQ(cls->GetDexCache(), compilation_unit.GetDexCache().Get());
+ DCHECK_EQ(cls->GetDexCache(), dex_cache.Get());
index = cls->GetDexTypeIndex();
+ // Update the dex cache to ensure the class is in. The generated code will
+ // consider it is. We make it safe by updating the dex cache, as other
+ // dex files might also load the class, and there is no guarantee the dex
+ // cache of the dex file of the class will be updated.
+ if (dex_cache->GetResolvedType(index) == nullptr) {
+ dex_cache->SetResolvedType(index, cls);
+ }
} else {
index = cls->FindTypeIndexInOtherDexFile(dex_file);
- // We cannot guarantee the entry will resolve to the same class,
+ // We cannot guarantee the entry in the dex cache will resolve to the same class,
// as there may be different class loaders. So only return the index if it's
- // the right class already resolved with the class loader.
- if (index.IsValid()) {
- ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType(
- index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
- if (resolved != cls) {
- index = dex::TypeIndex::Invalid();
- }
+ // the right class in the dex cache already.
+ if (index.IsValid() && dex_cache->GetResolvedType(index) != cls) {
+ index = dex::TypeIndex::Invalid();
}
}
@@ -249,20 +252,25 @@ class ScopedProfilingInfoInlineUse {
ProfilingInfo* const profiling_info_;
};
-static bool IsMonomorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_GE(InlineCache::kIndividualCacheSize, 2);
- return classes->Get(0) != nullptr && classes->Get(1) == nullptr;
-}
-
-static bool IsMegamorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- if (classes->Get(i) == nullptr) {
- return false;
+HInliner::InlineCacheType HInliner::GetInlineCacheType(
+ const Handle<mirror::ObjectArray<mirror::Class>>& classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t number_of_types = 0;
+ for (; number_of_types < InlineCache::kIndividualCacheSize; ++number_of_types) {
+ if (classes->Get(number_of_types) == nullptr) {
+ break;
}
}
- return true;
+
+ if (number_of_types == 0) {
+ return kInlineCacheUninitialized;
+ } else if (number_of_types == 1) {
+ return kInlineCacheMonomorphic;
+ } else if (number_of_types == InlineCache::kIndividualCacheSize) {
+ return kInlineCacheMegamorphic;
+ } else {
+ return kInlineCachePolymorphic;
+ }
}
static mirror::Class* GetMonomorphicType(Handle<mirror::ObjectArray<mirror::Class>> classes)
@@ -271,18 +279,6 @@ static mirror::Class* GetMonomorphicType(Handle<mirror::ObjectArray<mirror::Clas
return classes->Get(0);
}
-static bool IsUninitialized(Handle<mirror::ObjectArray<mirror::Class>> classes)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return classes->Get(0) == nullptr;
-}
-
-static bool IsPolymorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_GE(InlineCache::kIndividualCacheSize, 3);
- return classes->Get(1) != nullptr &&
- classes->Get(InlineCache::kIndividualCacheSize - 1) == nullptr;
-}
-
ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
if (!resolved_method->HasSingleImplementation()) {
return nullptr;
@@ -353,67 +349,206 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
}
return result;
}
-
DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
- // Check if we can use an inline cache.
- ArtMethod* caller = graph_->GetArtMethod();
- if (Runtime::Current()->UseJitCompilation()) {
- // Under JIT, we should always know the caller.
- DCHECK(caller != nullptr);
- ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
- ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
- if (profiling_info != nullptr) {
- StackHandleScope<1> hs(soa.Self());
- ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- Handle<mirror::ObjectArray<mirror::Class>> inline_cache = hs.NewHandle(
- mirror::ObjectArray<mirror::Class>::Alloc(
- soa.Self(),
- class_linker->GetClassRoot(ClassLinker::kClassArrayClass),
- InlineCache::kIndividualCacheSize));
- if (inline_cache == nullptr) {
- // We got an OOME. Just clear the exception, and don't inline.
- DCHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- VLOG(compiler) << "Out of memory in the compiler when trying to inline";
- return false;
+ // Try using inline caches.
+ return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
+}
+
+static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
+ const DexCompilationUnit& compilation_unit,
+ StackHandleScope<1>* hs)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ ClassLinker* class_linker = compilation_unit.GetClassLinker();
+ Handle<mirror::ObjectArray<mirror::Class>> inline_cache = hs->NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(
+ self,
+ class_linker->GetClassRoot(ClassLinker::kClassArrayClass),
+ InlineCache::kIndividualCacheSize));
+ if (inline_cache == nullptr) {
+ // We got an OOME. Just clear the exception, and don't inline.
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ VLOG(compiler) << "Out of memory in the compiler when trying to inline";
+ }
+ return inline_cache;
+}
+
+bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
+ HInvoke* invoke_instruction,
+ ArtMethod* resolved_method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::ObjectArray<mirror::Class>> inline_cache;
+ InlineCacheType inline_cache_type = Runtime::Current()->IsAotCompiler()
+ ? GetInlineCacheAOT(caller_dex_file, invoke_instruction, &hs, &inline_cache)
+ : GetInlineCacheJIT(invoke_instruction, &hs, &inline_cache);
+
+ switch (inline_cache_type) {
+ case kInlineCacheNoData:
+ break;
+
+ case kInlineCacheUninitialized:
+ VLOG(compiler) << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is not hit and not inlined";
+ return false;
+
+ case kInlineCacheMonomorphic:
+ MaybeRecordStat(kMonomorphicCall);
+ if (outermost_graph_->IsCompilingOsr()) {
+ // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
+ // interpreter and it may have seen different receiver types.
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
} else {
- Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
- *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
- inline_cache);
- if (IsUninitialized(inline_cache)) {
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(method_index)
- << " is not hit and not inlined";
- return false;
- } else if (IsMonomorphic(inline_cache)) {
- MaybeRecordStat(kMonomorphicCall);
- if (outermost_graph_->IsCompilingOsr()) {
- // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
- // interpreter and it may have seen different receiver types.
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
- } else {
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
- }
- } else if (IsPolymorphic(inline_cache)) {
- MaybeRecordStat(kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
- } else {
- DCHECK(IsMegamorphic(inline_cache));
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(method_index)
- << " is megamorphic and not inlined";
- MaybeRecordStat(kMegamorphicCall);
- return false;
- }
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
+ }
+
+ case kInlineCachePolymorphic:
+ MaybeRecordStat(kPolymorphicCall);
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+
+ case kInlineCacheMegamorphic:
+ VLOG(compiler) << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is megamorphic and not inlined";
+ MaybeRecordStat(kMegamorphicCall);
+ return false;
+
+ case kInlineCacheMissingTypes:
+ VLOG(compiler) << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is missing types and not inlined";
+ return false;
+ }
+ UNREACHABLE();
+}
+
+HInliner::InlineCacheType HInliner::GetInlineCacheJIT(
+ HInvoke* invoke_instruction,
+ StackHandleScope<1>* hs,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(Runtime::Current()->UseJitCompilation());
+
+ ArtMethod* caller = graph_->GetArtMethod();
+ // Under JIT, we should always know the caller.
+ DCHECK(caller != nullptr);
+ ScopedProfilingInfoInlineUse spiis(caller, Thread::Current());
+ ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
+
+ if (profiling_info == nullptr) {
+ return kInlineCacheNoData;
+ }
+
+ *inline_cache = AllocateInlineCacheHolder(caller_compilation_unit_, hs);
+ if (inline_cache->Get() == nullptr) {
+ // We can't extract any data if we failed to allocate;
+ return kInlineCacheNoData;
+ } else {
+ Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
+ *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
+ *inline_cache);
+ return GetInlineCacheType(*inline_cache);
+ }
+}
+
+HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
+ const DexFile& caller_dex_file,
+ HInvoke* invoke_instruction,
+ StackHandleScope<1>* hs,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ const ProfileCompilationInfo* pci = compiler_driver_->GetProfileCompilationInfo();
+ if (pci == nullptr) {
+ return kInlineCacheNoData;
+ }
+
+ ProfileCompilationInfo::OfflineProfileMethodInfo offline_profile;
+ bool found = pci->GetMethod(caller_dex_file.GetLocation(),
+ caller_dex_file.GetLocationChecksum(),
+ caller_compilation_unit_.GetDexMethodIndex(),
+ &offline_profile);
+ if (!found) {
+ return kInlineCacheNoData; // no profile information for this invocation.
+ }
+
+ *inline_cache = AllocateInlineCacheHolder(caller_compilation_unit_, hs);
+ if (inline_cache == nullptr) {
+ // We can't extract any data if we failed to allocate;
+ return kInlineCacheNoData;
+ } else {
+ return ExtractClassesFromOfflineProfile(invoke_instruction,
+ offline_profile,
+ *inline_cache);
+ }
+}
+
+HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile(
+ const HInvoke* invoke_instruction,
+ const ProfileCompilationInfo::OfflineProfileMethodInfo& offline_profile,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>> inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const auto it = offline_profile.inline_caches.find(invoke_instruction->GetDexPc());
+ if (it == offline_profile.inline_caches.end()) {
+ return kInlineCacheUninitialized;
+ }
+
+ const ProfileCompilationInfo::DexPcData& dex_pc_data = it->second;
+
+ if (dex_pc_data.is_missing_types) {
+ return kInlineCacheMissingTypes;
+ }
+ if (dex_pc_data.is_megamorphic) {
+ return kInlineCacheMegamorphic;
+ }
+
+ DCHECK_LE(dex_pc_data.classes.size(), InlineCache::kIndividualCacheSize);
+ Thread* self = Thread::Current();
+ // We need to resolve the class relative to the containing dex file.
+ // So first, build a mapping from the index of dex file in the profile to
+ // its dex cache. This will avoid repeating the lookup when walking over
+ // the inline cache types.
+ std::vector<ObjPtr<mirror::DexCache>> dex_profile_index_to_dex_cache(
+ offline_profile.dex_references.size());
+ for (size_t i = 0; i < offline_profile.dex_references.size(); i++) {
+ bool found = false;
+ for (const DexFile* dex_file : compiler_driver_->GetDexFilesForOatFile()) {
+ if (offline_profile.dex_references[i].MatchesDex(dex_file)) {
+ dex_profile_index_to_dex_cache[i] =
+ caller_compilation_unit_.GetClassLinker()->FindDexCache(self, *dex_file);
+ found = true;
}
}
+ if (!found) {
+ VLOG(compiler) << "Could not find profiled dex file: "
+ << offline_profile.dex_references[i].dex_location;
+ return kInlineCacheMissingTypes;
+ }
}
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(method_index)
- << " could not be statically determined";
- return false;
+ // Walk over the classes and resolve them. If we cannot find a type we return
+ // kInlineCacheMissingTypes.
+ int ic_index = 0;
+ for (const ProfileCompilationInfo::ClassReference& class_ref : dex_pc_data.classes) {
+ ObjPtr<mirror::DexCache> dex_cache =
+ dex_profile_index_to_dex_cache[class_ref.dex_profile_index];
+ DCHECK(dex_cache != nullptr);
+ ObjPtr<mirror::Class> clazz = dex_cache->GetResolvedType(class_ref.type_index);
+ if (clazz != nullptr) {
+ inline_cache->Set(ic_index++, clazz);
+ } else {
+ VLOG(compiler) << "Could not resolve class from inline cache in AOT mode "
+ << caller_compilation_unit_.GetDexFile()->PrettyMethod(
+ invoke_instruction->GetDexMethodIndex()) << " : "
+ << caller_compilation_unit_
+ .GetDexFile()->StringByTypeIdx(class_ref.type_index);
+ return kInlineCacheMissingTypes;
+ }
+ }
+ return GetInlineCacheType(inline_cache);
}
HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker,
@@ -442,8 +577,9 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
dex::TypeIndex class_index = FindClassIndexIn(
- GetMonomorphicType(classes), caller_compilation_unit_);
+ GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
if (!class_index.IsValid()) {
VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
@@ -486,7 +622,6 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
ReferenceTypePropagation rtp_fixup(graph_,
- outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -556,6 +691,13 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
// Insert before setting the kind, as setting the kind affects the inputs.
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
load_class->SetLoadKind(kind);
+ // In AOT mode, we will most likely load the class from BSS, which will involve a call
+ // to the runtime. In this case, the load instruction will need an environment so copy
+ // it from the invoke instruction.
+ if (load_class->NeedsEnvironment()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ load_class->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ }
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
@@ -580,6 +722,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
+ const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
bool one_target_inlined = false;
@@ -601,7 +744,8 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
+ dex::TypeIndex class_index = FindClassIndexIn(
+ handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
@@ -657,7 +801,6 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
- outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -746,7 +889,10 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
// This optimization only works under JIT for now.
- DCHECK(Runtime::Current()->UseJitCompilation());
+ if (!Runtime::Current()->UseJitCompilation()) {
+ return false;
+ }
+
if (graph_->GetInstructionSet() == kMips64) {
// TODO: Support HClassTableGet for mips64.
return false;
@@ -851,7 +997,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
- outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false);
@@ -920,7 +1065,6 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// Actual return value has a more specific type than the method's declared
// return type. Run RTP again on the outer graph to propagate it.
ReferenceTypePropagation(graph_,
- outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
@@ -1173,11 +1317,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
- ReferenceTypePropagation rtp(graph_,
- outer_compilation_unit_.GetClassLoader(),
- dex_cache,
- handles_,
- /* is_first_run */ false);
+ ReferenceTypePropagation rtp(graph_, dex_cache, handles_, /* is_first_run */ false);
rtp.Visit(iget);
}
return iget;
@@ -1223,7 +1363,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetClassLoader()));
DexCompilationUnit dex_compilation_unit(
- class_loader,
+ class_loader.ToJObject(),
class_linker,
callee_dex_file,
code_item,
@@ -1347,7 +1487,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// are more specific than the declared ones, run RTP again on the inner graph.
if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
ReferenceTypePropagation(callee_graph,
- outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
/* is_first_run */ false).Run();
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 75d025ae41..8f8b268cb2 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -20,6 +20,7 @@
#include "dex_file_types.h"
#include "invoke_type.h"
#include "optimization.h"
+#include "jit/profile_compilation_info.h"
namespace art {
@@ -59,6 +60,15 @@ class HInliner : public HOptimization {
static constexpr const char* kInlinerPassName = "inliner";
private:
+ enum InlineCacheType {
+ kInlineCacheNoData = 0,
+ kInlineCacheUninitialized = 1,
+ kInlineCacheMonomorphic = 2,
+ kInlineCachePolymorphic = 3,
+ kInlineCacheMegamorphic = 4,
+ kInlineCacheMissingTypes = 5
+ };
+
bool TryInline(HInvoke* invoke_instruction);
// Try to inline `resolved_method` in place of `invoke_instruction`. `do_rtp` is whether
@@ -106,6 +116,45 @@ class HInliner : public HOptimization {
HInstruction* obj,
HInstruction* value);
+ // Try inlining the invoke instruction using inline caches.
+ bool TryInlineFromInlineCache(
+ const DexFile& caller_dex_file,
+ HInvoke* invoke_instruction,
+ ArtMethod* resolved_method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Try getting the inline cache from JIT code cache.
+ // Return true if the inline cache was successfully allocated and the
+ // invoke info was found in the profile info.
+ InlineCacheType GetInlineCacheJIT(
+ HInvoke* invoke_instruction,
+ StackHandleScope<1>* hs,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Try getting the inline cache from AOT offline profile.
+ // Return true if the inline cache was successfully allocated and the
+ // invoke info was found in the profile info.
+ InlineCacheType GetInlineCacheAOT(const DexFile& caller_dex_file,
+ HInvoke* invoke_instruction,
+ StackHandleScope<1>* hs,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Extract the mirror classes from the offline profile and add them to the `inline_cache`.
+ // Note that even if we have profile data for the invoke the inline_cache might contain
+ // only null entries if the types cannot be resolved.
+ InlineCacheType ExtractClassesFromOfflineProfile(
+ const HInvoke* invoke_instruction,
+ const ProfileCompilationInfo::OfflineProfileMethodInfo& offline_profile,
+ /*out*/Handle<mirror::ObjectArray<mirror::Class>> inline_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Compute the inline cache type.
+ InlineCacheType GetInlineCacheType(
+ const Handle<mirror::ObjectArray<mirror::Class>>& classes)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Try to inline the target of a monomorphic call. If successful, the code
// in the graph will look like:
// if (receiver.getClass() != ic.GetMonomorphicType()) deopt
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 88f67fae04..1053da44d6 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -677,10 +677,11 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) {
ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
// We fetch the referenced class eagerly (that is, the class pointed by in the MethodId
// at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache.
@@ -1267,7 +1268,9 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
static mirror::Class* GetClassFrom(CompilerDriver* driver,
const DexCompilationUnit& compilation_unit) {
ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(compilation_unit.GetClassLoader())));
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
@@ -1283,9 +1286,10 @@ mirror::Class* HInstructionBuilder::GetCompilingClass() const {
bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1321,7 +1325,8 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
@@ -1638,8 +1643,10 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
@@ -1723,9 +1730,17 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
}
}
-bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const {
+bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index,
+ Handle<mirror::DexCache> dex_cache,
+ bool* finalizable) const {
return !compiler_driver_->CanAccessInstantiableTypeWithoutChecks(
- LookupReferrerClass(), LookupResolvedType(type_index, *dex_compilation_unit_), finalizable);
+ dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index, finalizable);
+}
+
+bool HInstructionBuilder::NeedsAccessCheck(dex::TypeIndex type_index, bool* finalizable) const {
+ ScopedObjectAccess soa(Thread::Current());
+ Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
+ return NeedsAccessCheck(type_index, dex_cache, finalizable);
}
bool HInstructionBuilder::CanDecodeQuickenedInfo() const {
@@ -2765,18 +2780,4 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
return true;
} // NOLINT(readability/fn_size)
-ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
- dex::TypeIndex type_index,
- const DexCompilationUnit& compilation_unit) const {
- return ClassLinker::LookupResolvedType(
- type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get());
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
- // TODO: Cache the result in a Handle<mirror::Class>.
- const DexFile::MethodId& method_id =
- dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
- return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
-}
-
} // namespace art
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 7fdc1883ca..6cb6655252 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -110,8 +110,11 @@ class HInstructionBuilder : public ValueObject {
// Returns whether the current method needs access check for the type.
// Output parameter finalizable is set to whether the type is finalizable.
- bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const
+ bool NeedsAccessCheck(dex::TypeIndex type_index,
+ Handle<mirror::DexCache> dex_cache,
+ /*out*/bool* finalizable) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool NeedsAccessCheck(dex::TypeIndex type_index, /*out*/bool* finalizable) const;
template<typename T>
void Unop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
@@ -301,12 +304,6 @@ class HInstructionBuilder : public ValueObject {
// be found.
ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
- ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_index,
- const DexCompilationUnit& compilation_unit) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
-
ArenaAllocator* const arena_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 490e50cb77..0e02311672 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -24,17 +24,17 @@
#include "optimizing/code_generator.h"
#include "optimizing/optimizing_unit_test.h"
#include "utils/assembler.h"
-#ifdef ART_USE_OLD_ARM_BACKEND
-#include "utils/arm/assembler_thumb2.h"
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
#include "utils/arm/assembler_arm_vixl.h"
+#else
+#include "utils/arm/assembler_thumb2.h"
#endif
#include "utils/mips/assembler_mips.h"
#include "utils/mips64/assembler_mips64.h"
#include "optimizing/optimizing_cfi_test_expected.inc"
-#ifndef ART_USE_OLD_ARM_BACKEND
+#ifdef ART_USE_VIXL_ARM_BACKEND
namespace vixl32 = vixl::aarch32;
using vixl32::r0;
@@ -196,15 +196,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
expected_cfi_kThumb2_adjust,
expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
SetUpFrame(kThumb2);
-#ifdef ART_USE_OLD_ARM_BACKEND
-#define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())->
- Label target;
- __ CompareAndBranchIfZero(arm::R0, &target);
- // Push the target out of range of CBZ.
- for (size_t i = 0; i != 65; ++i) {
- __ ldr(arm::R0, arm::Address(arm::R0));
- }
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
#define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
->GetAssembler())->GetVIXLAssembler()->
vixl32::Label target;
@@ -213,6 +205,14 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
for (size_t i = 0; i != 65; ++i) {
__ Ldr(r0, vixl32::MemOperand(r0));
}
+#else
+#define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())->
+ Label target;
+ __ CompareAndBranchIfZero(arm::R0, &target);
+ // Push the target out of range of CBZ.
+ for (size_t i = 0; i != 65; ++i) {
+ __ ldr(arm::R0, arm::Address(arm::R0));
+ }
#endif
__ Bind(&target);
#undef __
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index d84fe6ccff..82670c38fe 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -223,15 +223,15 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000040: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
-#ifdef ART_USE_OLD_ARM_BACKEND
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
- 0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
// VIXL emits an extra 2 bytes here for a 32-bit beq as there is no
// optimistic 16-bit emit and subsequent fixup for out of reach targets
- // as with the old assembler.
+ // as with the current assembler.
0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28, 0x00, 0xF0,
0x41, 0x80, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
+#else
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
+ 0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
#endif
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
@@ -247,10 +247,10 @@ static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
};
static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
-#ifdef ART_USE_OLD_ARM_BACKEND
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
-#else
+#ifdef ART_USE_VIXL_ARM_BACKEND
0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x88, 0x0A,
+#else
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
#endif
0x42, 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B,
0x0E, 0x40,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6153b091c..918a027aba 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -307,7 +307,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
+ jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const OVERRIDE;
@@ -376,7 +376,7 @@ class OptimizingCompiler FINAL : public Compiler {
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
+ jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -884,7 +884,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
+ jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
@@ -955,8 +955,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
const uint8_t* interpreter_metadata = nullptr;
if (method == nullptr) {
ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(class_loader)));
method = compiler_driver->ResolveMethod(
- soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type);
+ soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
}
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
@@ -965,6 +968,16 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
graph->SetArtMethod(method);
ScopedObjectAccess soa(Thread::Current());
interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize());
+ dex::TypeIndex type_index = method->GetDeclaringClass()->GetDexTypeIndex();
+
+ // Update the dex cache if the type is not in it yet. Note that under AOT,
+ // the verifier must have set it, but under JIT, there's no guarantee, as we
+ // don't necessarily run the verifier.
+ // The compiler and the compiler driver assume the compiling class is
+ // in the dex cache.
+ if (dex_cache->GetResolvedType(type_index) == nullptr) {
+ dex_cache->SetResolvedType(type_index, method->GetDeclaringClass());
+ }
}
std::unique_ptr<CodeGenerator> codegen(
@@ -1045,7 +1058,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
InvokeType invoke_type,
uint16_t class_def_idx,
uint32_t method_idx,
- Handle<mirror::ClassLoader> jclass_loader,
+ jobject jclass_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -1159,6 +1172,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
DCHECK(method->IsCompilable());
+ jobject jclass_loader = class_loader.ToJObject();
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
@@ -1182,7 +1196,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
invoke_type,
class_def_idx,
method_idx,
- class_loader,
+ jclass_loader,
*dex_file,
dex_cache,
method,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 6e332ca59b..c55fccc7d3 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -65,13 +65,11 @@ ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetThrowabl
class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
public:
RTPVisitor(HGraph* graph,
- Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
HandleCache* handle_cache,
ArenaVector<HInstruction*>* worklist,
bool is_first_run)
: HGraphDelegateVisitor(graph),
- class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handle_cache),
worklist_(worklist),
@@ -103,7 +101,6 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
bool is_exact);
private:
- Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> hint_dex_cache_;
HandleCache* handle_cache_;
ArenaVector<HInstruction*>* worklist_;
@@ -111,13 +108,11 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
- Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
const char* name)
: HOptimization(graph, name),
- class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
@@ -152,12 +147,7 @@ void ReferenceTypePropagation::ValidateTypes() {
}
void ReferenceTypePropagation::Visit(HInstruction* instruction) {
- RTPVisitor visitor(graph_,
- class_loader_,
- hint_dex_cache_,
- &handle_cache_,
- &worklist_,
- is_first_run_);
+ RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
instruction->Accept(&visitor);
}
@@ -331,12 +321,7 @@ void ReferenceTypePropagation::Run() {
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_,
- class_loader_,
- hint_dex_cache_,
- &handle_cache_,
- &worklist_,
- is_first_run_);
+ RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -557,9 +542,8 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
- ObjPtr<mirror::Class> klass =
- ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get());
- SetClassAsTypeInfo(instr, klass, is_exact);
+ // Get type from dex cache assuming it was populated by the verifier.
+ SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
@@ -572,13 +556,25 @@ void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
+static mirror::Class* GetClassFromDexCache(Thread* self,
+ const DexFile& dex_file,
+ dex::TypeIndex type_idx,
+ Handle<mirror::DexCache> hint_dex_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
+ // Get type from dex cache assuming it was populated by the verifier.
+ return dex_cache->GetResolvedType(type_idx);
+}
+
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- UpdateReferenceTypeInfo(instr,
- instr->GetTypeIndex(),
- instr->GetDexFile(),
- /* is_exact */ false);
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
+ instr->GetDexFile(),
+ instr->GetTypeIndex(),
+ hint_dex_cache_);
+ SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 215e96786b..4663471729 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -33,7 +33,6 @@ namespace art {
class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
- Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> hint_dex_cache,
VariableSizedHandleScope* handles,
bool is_first_run,
@@ -106,8 +105,6 @@ class ReferenceTypePropagation : public HOptimization {
void ValidateTypes();
- Handle<mirror::ClassLoader> class_loader_;
-
// Note: hint_dex_cache_ is usually, but not necessarily, the dex cache associated with
// graph_->GetDexFile(). Since we may look up also in other dex files, it's used only
// as a hint, to reduce the number of calls to the costly ClassLinker::FindDexCache().
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 84a4bab1a9..b061c871b0 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -38,7 +38,6 @@ class ReferenceTypePropagationTest : public CommonCompilerTest {
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
- Handle<mirror::ClassLoader>(),
Handle<mirror::DexCache>(),
handles,
true,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 50ab11bc23..487e4dd498 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -499,11 +499,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// 4) Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
- ReferenceTypePropagation(graph_,
- class_loader_,
- dex_cache_,
- handles_,
- /* is_first_run */ true).Run();
+ ReferenceTypePropagation(graph_, dex_cache_, handles_, /* is_first_run */ true).Run();
// 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 978f113ec4..45dac54115 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -48,11 +48,9 @@ namespace art {
class SsaBuilder : public ValueObject {
public:
SsaBuilder(HGraph* graph,
- Handle<mirror::ClassLoader> class_loader,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
- class_loader_(class_loader),
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
@@ -117,7 +115,6 @@ class SsaBuilder : public ValueObject {
void RemoveRedundantUninitializedStrings();
HGraph* graph_;
- Handle<mirror::ClassLoader> class_loader_;
Handle<mirror::DexCache> dex_cache_;
VariableSizedHandleScope* const handles_;