diff options
Diffstat (limited to 'compiler/optimizing')
21 files changed, 141 insertions, 120 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index fd396c474c..78a8afb156 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -23,8 +23,6 @@ #include "base/arena_object.h" #include "base/bit_field.h" #include "base/enums.h" -#include "compiled_method.h" -#include "driver/compiler_options.h" #include "globals.h" #include "graph_visualizer.h" #include "locations.h" @@ -54,6 +52,7 @@ static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff); class Assembler; class CodeGenerator; class CompilerDriver; +class CompilerOptions; class LinkerPatch; class ParallelMoveResolver; diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index a07dd6b5ef..ac10e2364a 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -63,9 +63,9 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegis DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); }; -static constexpr DRegister FromLowSToD(SRegister reg) { - return DCHECK_CONSTEXPR(reg % 2 == 0, , D0) - static_cast<DRegister>(reg / 2); +constexpr DRegister FromLowSToD(SRegister reg) { + DCHECK_EQ(reg % 2, 0); + return static_cast<DRegister>(reg / 2); } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index cec3ca1103..fe6069c242 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -20,6 +20,7 @@ #include "arch/mips/instruction_set_features_mips.h" #include "art_method.h" #include "code_generator_utils.h" +#include "compiled_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "gc/accounting/card_table.h" diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index 0614945ddc..5f39a49d68 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -47,6 +47,9 @@ class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor { private: void VisitShift(HBinaryOperation* shift); + void VisitEqual(HEqual* instruction) OVERRIDE; + void VisitNotEqual(HNotEqual* instruction) OVERRIDE; + void VisitAbove(HAbove* instruction) OVERRIDE; void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE; void VisitBelow(HBelow* instruction) OVERRIDE; @@ -140,6 +143,30 @@ void InstructionWithAbsorbingInputSimplifier::VisitShift(HBinaryOperation* instr } } +void InstructionWithAbsorbingInputSimplifier::VisitEqual(HEqual* instruction) { + if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) || + (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) { + // Replace code looking like + // EQUAL lhs, null + // where lhs cannot be null with + // CONSTANT false + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + +void InstructionWithAbsorbingInputSimplifier::VisitNotEqual(HNotEqual* instruction) { + if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) || + (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) { + // Replace code looking like + // NOT_EQUAL lhs, null + // where lhs cannot be null with + // CONSTANT true + instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1)); + instruction->GetBlock()->RemoveInstruction(instruction); + } +} + void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) { if (instruction->GetLeft()->IsConstant() && instruction->GetLeft()->AsConstant()->IsArithmeticZero()) { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 451aa38033..1e5f0b6c75 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -109,7 +109,7 @@ void HInliner::Run() { } static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { return method->IsFinal() || method->GetDeclaringClass()->IsFinal(); } @@ -119,7 +119,7 @@ static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) * Return nullptr if the runtime target cannot be proven. */ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (IsMethodOrDeclaringClassFinal(resolved_method)) { // No need to lookup further, the resolved method will be the target. return resolved_method; @@ -189,7 +189,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol static uint32_t FindMethodIndexIn(ArtMethod* method, const DexFile& dex_file, uint32_t name_and_signature_index) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (IsSameDexFile(*method->GetDexFile(), dex_file)) { return method->GetDexMethodIndex(); } else { @@ -200,7 +200,7 @@ static uint32_t FindMethodIndexIn(ArtMethod* method, static uint32_t FindClassIndexIn(mirror::Class* cls, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { uint32_t index = DexFile::kDexNoIndex; if (cls->GetDexCache() == nullptr) { DCHECK(cls->IsArrayClass()) << PrettyClass(cls); @@ -894,7 +894,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction, static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction, size_t arg_vreg_index) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { size_t input_index = 0; for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) { DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments()); @@ -1030,7 +1030,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction, HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache, uint32_t field_index, HInstruction* obj) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet()); ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size); DCHECK(resolved_field != nullptr); @@ -1058,7 +1058,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex uint32_t field_index, HInstruction* obj, HInstruction* value) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet()); ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size); DCHECK(resolved_field != nullptr); @@ -1374,7 +1374,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph, static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti, bool declared_can_be_null, HInstruction* actual_obj) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (declared_can_be_null && !actual_obj->CanBeNull()) { return true; } diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 02d3a5f499..486626b1fe 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -64,12 +64,12 @@ class HInliner : public HOptimization { // reference type propagation can run after the inlining. If the inlining is successful, this // method will replace and remove the `invoke_instruction`. bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); bool TryBuildAndInline(HInvoke* invoke_instruction, ArtMethod* resolved_method, HInstruction** return_replacement) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); bool TryBuildAndInlineHelper(HInvoke* invoke_instruction, ArtMethod* resolved_method, @@ -86,7 +86,7 @@ class HInliner : public HOptimization { bool TryPatternSubstitution(HInvoke* invoke_instruction, ArtMethod* resolved_method, HInstruction** return_replacement) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Create a new HInstanceFieldGet. HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache, @@ -105,38 +105,38 @@ class HInliner : public HOptimization { bool TryInlineMonomorphicCall(HInvoke* invoke_instruction, ArtMethod* resolved_method, const InlineCache& ic) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Try to inline targets of a polymorphic call. bool TryInlinePolymorphicCall(HInvoke* invoke_instruction, ArtMethod* resolved_method, const InlineCache& ic) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, ArtMethod* resolved_method, const InlineCache& ic) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker, HInstruction* receiver, uint32_t dex_pc) const - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); void FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Creates an instance of ReferenceTypeInfo from `klass` if `klass` is // admissible (see ReferenceTypePropagation::IsAdmissible for details). // Otherwise returns inexact Object RTI. - ReferenceTypeInfo GetClassRTI(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + ReferenceTypeInfo GetClassRTI(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_); bool ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* resolved_method) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); bool ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Add a type guard on the given `receiver`. This will add to the graph: // i0 = HFieldGet(receiver, klass) @@ -154,7 +154,7 @@ class HInliner : public HOptimization { bool is_referrer, HInstruction* invoke_instruction, bool with_deoptimization) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); /* * Ad-hoc implementation for implementing a diamond pattern in the graph for diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index e5dab569fd..453068b560 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -957,7 +957,7 @@ bool HInstructionBuilder::BuildNewInstance(uint16_t type_index, uint32_t dex_pc) } static bool IsSubClass(mirror::Class* to_test, mirror::Class* super_class) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class); } @@ -1607,7 +1607,7 @@ void HInstructionBuilder::BuildFillWideArrayData(HInstruction* object, } static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (cls.Get() == nullptr) { return TypeCheckKind::kUnresolvedCheck; } else if (cls->IsInterface()) { diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h index 517cf76831..aa34ddd1d1 100644 --- a/compiler/optimizing/instruction_builder.h +++ b/compiler/optimizing/instruction_builder.h @@ -103,7 +103,7 @@ class HInstructionBuilder : public ValueObject { bool NeedsAccessCheck(uint32_t type_index, Handle<mirror::DexCache> dex_cache, /*out*/bool* finalizable) const - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); bool NeedsAccessCheck(uint32_t type_index, /*out*/bool* finalizable) const; template<typename T> @@ -255,14 +255,14 @@ class HInstructionBuilder : public ValueObject { ArtMethod* method, uint32_t method_idx, HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Build a HNewInstance instruction. bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc); // Return whether the compiler can assume `cls` is initialized. bool IsInitialized(Handle<mirror::Class> cls) const - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); // Try to resolve a method using the class linker. Return null if a method could // not be resolved. diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 99ad898fd6..5239f8f020 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -634,7 +634,7 @@ static void GenBitCount(LocationSummary* locations, // For 64-bit quantities, this algorithm gets executed twice, (once // for in_lo, and again for in_hi), but saves a few instructions // because the mask values only have to be loaded once. Using this - // algorithm the count for a 64-bit operand can be performed in 33 + // algorithm the count for a 64-bit operand can be performed in 29 // instructions compared to a loop-based algorithm which required 47 // instructions. @@ -687,37 +687,36 @@ static void GenBitCount(LocationSummary* locations, __ Srl(tmp_lo, tmp_lo, 2); __ And(tmp_lo, tmp_lo, AT); __ Addu(tmp_lo, out_lo, tmp_lo); - __ Srl(out_lo, tmp_lo, 4); - __ Addu(out_lo, out_lo, tmp_lo); __ And(out_hi, tmp_hi, AT); __ Srl(tmp_hi, tmp_hi, 2); __ And(tmp_hi, tmp_hi, AT); __ Addu(tmp_hi, out_hi, tmp_hi); - __ Srl(out_hi, tmp_hi, 4); - __ Addu(out_hi, out_hi, tmp_hi); + // Here we deviate from the original algorithm a bit. We've reached + // the stage where the bitfields holding the subtotals are large + // enough to hold the combined subtotals for both the low word, and + // the high word. This means that we can add the subtotals for the + // the high, and low words into a single word, and compute the final + // result for both the high, and low words using fewer instructions. __ LoadConst32(AT, 0x0F0F0F0F); - __ And(out_lo, out_lo, AT); - __ And(out_hi, out_hi, AT); + __ Addu(TMP, tmp_hi, tmp_lo); + + __ Srl(out, TMP, 4); + __ And(out, out, AT); + __ And(TMP, TMP, AT); + __ Addu(out, out, TMP); __ LoadConst32(AT, 0x01010101); if (isR6) { - __ MulR6(out_lo, out_lo, AT); - - __ MulR6(out_hi, out_hi, AT); + __ MulR6(out, out, AT); } else { - __ MulR2(out_lo, out_lo, AT); - - __ MulR2(out_hi, out_hi, AT); + __ MulR2(out, out, AT); } - __ Srl(out_lo, out_lo, 24); - __ Srl(out_hi, out_hi, 24); - - __ Addu(out, out_hi, out_lo); + __ Srl(out, out, 24); } } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 2808e1b5fc..8f37236ede 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2242,7 +2242,7 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) { } static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (rti.IsValid()) { DCHECK(upper_bound_rti.IsSupertypeOf(rti)) << " upper_bound_rti: " << upper_bound_rti diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 94913fc562..19e499ba8c 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -171,7 +171,7 @@ class ReferenceTypeInfo : ValueObject { static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact); - static ReferenceTypeInfo Create(TypeHandle type_handle) SHARED_REQUIRES(Locks::mutator_lock_) { + static ReferenceTypeInfo Create(TypeHandle type_handle) REQUIRES_SHARED(Locks::mutator_lock_) { return Create(type_handle, type_handle->CannotBeAssignedFromOtherTypes()); } @@ -191,49 +191,49 @@ class ReferenceTypeInfo : ValueObject { bool IsExact() const { return is_exact_; } - bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsObjectClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsObjectClass(); } - bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsStringClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsStringClass(); } - bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsObjectArray() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass(); } - bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsInterface() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsInterface(); } - bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsArrayClass(); } - bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsPrimitiveArray(); } - bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsNonPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray(); } - bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool CanArrayHold(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); if (!IsExact()) return false; if (!IsArrayClass()) return false; return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get()); } - bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); if (!IsExact()) return false; if (!IsArrayClass()) return false; @@ -244,13 +244,13 @@ class ReferenceTypeInfo : ValueObject { Handle<mirror::Class> GetTypeHandle() const { return type_handle_; } - bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); DCHECK(rti.IsValid()); return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get()); } - bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsValid()); DCHECK(rti.IsValid()); return GetTypeHandle().Get() != rti.GetTypeHandle().Get() && @@ -260,7 +260,7 @@ class ReferenceTypeInfo : ValueObject { // Returns true if the type information provide the same amount of details. // Note that it does not mean that the instructions have the same actual type // (because the type can be the result of a merge). - bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsEqual(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) { if (!IsValid() && !rti.IsValid()) { // Invalid types are equal. return true; diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 8c0231e1aa..a1e923bd73 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -19,6 +19,7 @@ #include "arch/instruction_set.h" #include "cfi_test.h" +#include "driver/compiler_options.h" #include "gtest/gtest.h" #include "optimizing/code_generator.h" #include "optimizing/optimizing_unit_test.h" diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 6e98b4d54e..c5d761183a 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -294,7 +294,7 @@ class OptimizingCompiler FINAL : public Compiler { } uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); } @@ -311,7 +311,7 @@ class OptimizingCompiler FINAL : public Compiler { bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr) OVERRIDE - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); private: void RunOptimizations(HGraph* graph, diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index e96ab1918c..4289cf7e0f 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -27,7 +27,7 @@ namespace art { static inline mirror::DexCache* FindDexCacheWithHint(Thread* self, const DexFile& dex_file, Handle<mirror::DexCache> hint_dex_cache) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) { return hint_dex_cache.Get(); } else { @@ -85,7 +85,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { void VisitParameterValue(HParameterValue* instr) OVERRIDE; void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info); void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE; void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE; void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE; @@ -194,7 +194,7 @@ static bool ShouldCreateBoundType(HInstruction* position, ReferenceTypeInfo upper_bound, HInstruction* dominator_instr, HBasicBlock* dominator_block) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { // If the position where we should insert the bound type is not already a // a bound type then we need to create one. if (position == nullptr || !position->IsBoundType()) { @@ -487,7 +487,7 @@ static mirror::Class* GetClassFromDexCache(Thread* self, const DexFile& dex_file, uint16_t type_idx, Handle<mirror::DexCache> hint_dex_cache) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache); // Get type from dex cache assuming it was populated by the verifier. return dex_cache->GetResolvedType(type_idx); diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index edd83bf5de..1fa6624902 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -44,7 +44,7 @@ class ReferenceTypePropagation : public HOptimization { // Returns true if klass is admissible to the propagation: non-null and resolved. // For an array type, we also check if the component type is admissible. - static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + static bool IsAdmissible(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) { return klass != nullptr && klass->IsResolved() && (!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType())); @@ -58,7 +58,7 @@ class ReferenceTypePropagation : public HOptimization { explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { } template <typename T> - MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) { + MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) { return handles_->NewHandle(object); } @@ -80,8 +80,8 @@ class ReferenceTypePropagation : public HOptimization { void VisitPhi(HPhi* phi); void VisitBasicBlock(HBasicBlock* block); - void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_); - void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_); + void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_); + void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_); void BoundTypeForIfNotNull(HBasicBlock* block); void BoundTypeForIfInstanceOf(HBasicBlock* block); void ProcessWorklist(); @@ -92,10 +92,10 @@ class ReferenceTypePropagation : public HOptimization { bool UpdateReferenceTypeInfo(HInstruction* instr); static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b) - SHARED_REQUIRES(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_); void ValidateTypes(); diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc index 7649b5093c..75a4eac538 100644 --- a/compiler/optimizing/reference_type_propagation_test.cc +++ b/compiler/optimizing/reference_type_propagation_test.cc @@ -46,7 +46,7 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { // Relay method to merge type in reference type propagation. ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, - const ReferenceTypeInfo& b) SHARED_REQUIRES(Locks::mutator_lock_) { + const ReferenceTypeInfo& b) REQUIRES_SHARED(Locks::mutator_lock_) { return propagation_->MergeTypes(a, b); } @@ -56,12 +56,12 @@ class ReferenceTypePropagationTest : public CommonCompilerTest { } // Helper method to construct the Object type. - ReferenceTypeInfo ObjectType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) { + ReferenceTypeInfo ObjectType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) { return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetObjectClassHandle(), is_exact); } // Helper method to construct the String type. - ReferenceTypeInfo StringType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) { + ReferenceTypeInfo StringType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) { return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetStringClassHandle(), is_exact); } diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 40fff8af32..81163e296e 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -20,6 +20,7 @@ #include "base/enums.h" #include "class_linker.h" #include "code_generator.h" +#include "driver/compiler_options.h" #include "driver/dex_compilation_unit.h" #include "utils/dex_cache_arrays_layout-inl.h" #include "driver/compiler_driver.h" @@ -295,15 +296,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { DCHECK(!runtime->UseJitCompilation()); mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr); - if (compiler_driver_->GetSupportBootImageFixup()) { - DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file)); - desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic() - ? HLoadString::LoadKind::kBootImageLinkTimePcRelative - : HLoadString::LoadKind::kBootImageLinkTimeAddress; - } else { - // MIPS64 or compiler_driver_test. Do not sharpen. - DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod); - } + // TODO: In follow up CL, add PcRelative and Address back in. } else if (runtime->UseJitCompilation()) { // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus. // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic()); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 5a574d9af7..f7dc112d00 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -303,7 +303,7 @@ static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { } static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array) - SHARED_REQUIRES(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) { ReferenceTypeInfo array_type = array->GetReferenceTypeInfo(); DCHECK(array_type.IsPrimitiveArrayClass()); return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType(); diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index a01e107e02..a4d52d7761 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -59,6 +59,38 @@ static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasi worklist->insert(insert_pos.base(), block); } +static bool IsLinearOrderWellFormed(const HGraph& graph) { + for (HBasicBlock* header : graph.GetBlocks()) { + if (header == nullptr || !header->IsLoopHeader()) { + continue; + } + + HLoopInformation* loop = header->GetLoopInformation(); + size_t num_blocks = loop->GetBlocks().NumSetBits(); + size_t found_blocks = 0u; + + for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) { + HBasicBlock* current = it.Current(); + if (loop->Contains(*current)) { + found_blocks++; + if (found_blocks == 1u && current != header) { + // First block is not the header. + return false; + } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) { + // Last block is not a back edge. + return false; + } + } else if (found_blocks != 0u && found_blocks != num_blocks) { + // Blocks are not adjacent. + return false; + } + } + DCHECK_EQ(found_blocks, num_blocks); + } + + return true; +} + void SsaLivenessAnalysis::LinearizeGraph() { // Create a reverse post ordering with the following properties: // - Blocks in a loop are consecutive, @@ -100,6 +132,8 @@ void SsaLivenessAnalysis::LinearizeGraph() { forward_predecessors[block_id] = number_of_remaining_predecessors - 1; } } while (!worklist.empty()); + + DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_)); } void SsaLivenessAnalysis::NumberInstructions() { diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 92788fe6b8..9f94c8316b 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -983,38 +983,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { return false; } - bool IsLinearOrderWellFormed(const HGraph& graph) { - for (HBasicBlock* header : graph.GetBlocks()) { - if (header == nullptr || !header->IsLoopHeader()) { - continue; - } - - HLoopInformation* loop = header->GetLoopInformation(); - size_t num_blocks = loop->GetBlocks().NumSetBits(); - size_t found_blocks = 0u; - - for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) { - HBasicBlock* current = it.Current(); - if (loop->Contains(*current)) { - found_blocks++; - if (found_blocks == 1u && current != header) { - // First block is not the header. - return false; - } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) { - // Last block is not a back edge. - return false; - } - } else if (found_blocks != 0u && found_blocks != num_blocks) { - // Blocks are not adjacent. - return false; - } - } - DCHECK_EQ(found_blocks, num_blocks); - } - - return true; - } - void AddBackEdgeUses(const HBasicBlock& block_at_use) { DCHECK(block_at_use.IsInLoop()); if (block_at_use.GetGraph()->HasIrreducibleLoops()) { @@ -1024,8 +992,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { return; } - DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph())); - // Add synthesized uses at the back edge of loops to help the register allocator. // Note that this method is called in decreasing liveness order, to faciliate adding // uses at the head of the `first_use_` linked list. Because below diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc index 8aa315a7e3..4e256832a2 100644 --- a/compiler/optimizing/x86_memory_gen.cc +++ b/compiler/optimizing/x86_memory_gen.cc @@ -16,6 +16,7 @@ #include "x86_memory_gen.h" #include "code_generator.h" +#include "driver/compiler_options.h" namespace art { namespace x86 { |