diff options
| author | 2025-02-28 20:00:48 -0800 | |
|---|---|---|
| committer | 2025-02-28 20:00:48 -0800 | |
| commit | 69afbfef48b6ec50fcdc15bbe2b42ba15bc1d6d6 (patch) | |
| tree | 8067fabac21ff7d99693cb7ef7e66a26c2317c38 | |
| parent | aa82ffa0068f009ad565e613b1abb216044b1502 (diff) | |
| parent | 9903bb581aef568c7fb912fcb4f776f65f71f251 (diff) | |
Snap for 13143403 from 9903bb581aef568c7fb912fcb4f776f65f71f251 to 25Q2-release
Change-Id: I044fff03df6bc96888601f645503d6c830eda06c
24 files changed, 144 insertions, 119 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 5c2e4dbc51..d63b0abcc7 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -645,7 +645,7 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( bool is_get = field_access->IsUnresolvedInstanceFieldGet() || field_access->IsUnresolvedStaticFieldGet(); - ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 4566cdf0ca..de13814eaf 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -186,11 +186,12 @@ class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> { public: using VIXLInt32Literal = vixl::aarch64::Literal<int32_t>; - explicit JumpTableARM64(HPackedSwitch* switch_instr) + JumpTableARM64(HPackedSwitch* switch_instr, ArenaAllocator* allocator) : switch_instr_(switch_instr), table_start_(), - jump_targets_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + jump_targets_(allocator->Adapter(kArenaAllocCodeGenerator)) { uint32_t num_entries = switch_instr_->GetNumEntries(); + jump_targets_.reserve(num_entries); for (uint32_t i = 0; i < num_entries; i++) { VIXLInt32Literal* lit = new VIXLInt32Literal(0); jump_targets_.emplace_back(lit); @@ -765,7 +766,8 @@ class CodeGeneratorARM64 : public CodeGenerator { uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; } JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) { - jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr)); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); + jump_tables_.emplace_back(new (allocator) JumpTableARM64(switch_instr, allocator)); return jump_tables_.back().get(); } diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 2e20591c98..bbc519fcf9 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -204,11 +204,12 @@ ALWAYS_INLINE inline StoreOperandType GetStoreOperandType(DataType::Type type) { class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> { public: - explicit JumpTableARMVIXL(HPackedSwitch* switch_instr) + JumpTableARMVIXL(HPackedSwitch* switch_instr, ArenaAllocator* allocator) : switch_instr_(switch_instr), table_start_(), - bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + bb_addresses_(allocator->Adapter(kArenaAllocCodeGenerator)) { uint32_t num_entries = switch_instr_->GetNumEntries(); + bb_addresses_.reserve(num_entries); for (uint32_t i = 0; i < num_entries; i++) { VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced); bb_addresses_.emplace_back(lit); @@ -883,7 +884,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator { void GenerateExplicitNullCheck(HNullCheck* instruction) override; JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) { - jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr)); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); + jump_tables_.emplace_back(new (allocator) JumpTableARMVIXL(switch_instr, allocator)); return jump_tables_.back().get(); } void EmitJumpTables(); diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc index 71fc39a956..c89ec171d9 100644 --- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc +++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc @@ -33,7 +33,7 @@ class CFREVisitor final : public HGraphVisitor { : HGraphVisitor(graph), scoped_allocator_(graph->GetArenaStack()), candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)), - candidate_fence_targets_(std::nullopt), + candidate_fence_targets_(), stats_(stats) {} void VisitBasicBlock(HBasicBlock* block) override { @@ -48,14 +48,17 @@ class CFREVisitor final : public HGraphVisitor { void VisitConstructorFence(HConstructorFence* constructor_fence) override { candidate_fences_.push_back(constructor_fence); - if (!candidate_fence_targets_.has_value()) { + if (candidate_fence_targets_.SizeInBits() == 0u) { size_t number_of_instructions = GetGraph()->GetCurrentInstructionId(); - candidate_fence_targets_.emplace( - &scoped_allocator_, number_of_instructions, /*expandable=*/ false, kArenaAllocCFRE); + candidate_fence_targets_ = ArenaBitVector::CreateFixedSize( + &scoped_allocator_, number_of_instructions, kArenaAllocCFRE); + } else { + DCHECK_EQ(candidate_fence_targets_.SizeInBits(), + static_cast<size_t>(GetGraph()->GetCurrentInstructionId())); } for (HInstruction* input : constructor_fence->GetInputs()) { - candidate_fence_targets_->SetBit(input->GetId()); + candidate_fence_targets_.SetBit(input->GetId()); } } @@ -162,8 +165,7 @@ class CFREVisitor final : public HGraphVisitor { void VisitSetLocation([[maybe_unused]] HInstruction* inst, HInstruction* store_input) { if (candidate_fences_.empty()) { // There is no need to look at inputs if there are no candidate fence targets. - DCHECK_IMPLIES(candidate_fence_targets_.has_value(), - !candidate_fence_targets_->IsAnyBitSet()); + DCHECK(!candidate_fence_targets_.IsAnyBitSet()); return; } // An object is considered "published" if it's stored onto the heap. @@ -179,8 +181,7 @@ class CFREVisitor final : public HGraphVisitor { bool HasInterestingPublishTargetAsInput(HInstruction* inst) { if (candidate_fences_.empty()) { // There is no need to look at inputs if there are no candidate fence targets. - DCHECK_IMPLIES(candidate_fence_targets_.has_value(), - !candidate_fence_targets_->IsAnyBitSet()); + DCHECK(!candidate_fence_targets_.IsAnyBitSet()); return false; } for (HInstruction* input : inst->GetInputs()) { @@ -221,15 +222,17 @@ class CFREVisitor final : public HGraphVisitor { // there is no benefit to this extra complexity unless we also reordered // the stores to come later. candidate_fences_.clear(); - DCHECK(candidate_fence_targets_.has_value()); - candidate_fence_targets_->ClearAllBits(); + DCHECK_EQ(candidate_fence_targets_.SizeInBits(), + static_cast<size_t>(GetGraph()->GetCurrentInstructionId())); + candidate_fence_targets_.ClearAllBits(); } // A publishing 'store' is only interesting if the value being stored // is one of the fence `targets` in `candidate_fences`. bool IsInterestingPublishTarget(HInstruction* store_input) const { - DCHECK(candidate_fence_targets_.has_value()); - return candidate_fence_targets_->IsBitSet(store_input->GetId()); + DCHECK_EQ(candidate_fence_targets_.SizeInBits(), + static_cast<size_t>(GetGraph()->GetCurrentInstructionId())); + return candidate_fence_targets_.IsBitSet(store_input->GetId()); } // Phase-local heap memory allocator for CFRE optimizer. @@ -245,7 +248,7 @@ class CFREVisitor final : public HGraphVisitor { // Stores a set of the fence targets, to allow faster lookup of whether // a detected publish is a target of one of the candidate fences. - std::optional<ArenaBitVector> candidate_fence_targets_; + BitVectorView<size_t> candidate_fence_targets_; // Used to record stats about the optimization. OptimizingCompilerStats* const stats_; diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 9955982309..c367a20a06 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -201,7 +201,7 @@ static bool RemoveNonNullControlDependences(HBasicBlock* block, HBasicBlock* thr user_block != throws && block->Dominates(user_block)) { if (bound == nullptr) { - bound = new (obj->GetBlock()->GetGraph()->GetAllocator()) HBoundType(obj); + bound = new (block->GetGraph()->GetAllocator()) HBoundType(obj); bound->SetUpperBound(ti, /*can_be_null*/ false); bound->SetReferenceTypeInfo(ti); bound->SetCanBeNull(false); @@ -591,14 +591,15 @@ void HDeadCodeElimination::ConnectSuccessiveBlocks() { struct HDeadCodeElimination::TryBelongingInformation { TryBelongingInformation(HGraph* graph, ScopedArenaAllocator* allocator) - : blocks_in_try(allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE), - coalesced_try_entries( - allocator, graph->GetBlocks().size(), /*expandable=*/false, kArenaAllocDCE) {} + : blocks_in_try(ArenaBitVector::CreateFixedSize( + allocator, graph->GetBlocks().size(), kArenaAllocDCE)), + coalesced_try_entries(ArenaBitVector::CreateFixedSize( + allocator, graph->GetBlocks().size(), kArenaAllocDCE)) {} // Which blocks belong in the try. - ArenaBitVector blocks_in_try; + BitVectorView<size_t> blocks_in_try; // Which other try entries are referencing this same try. - ArenaBitVector coalesced_try_entries; + BitVectorView<size_t> coalesced_try_entries; }; bool HDeadCodeElimination::CanPerformTryRemoval(const TryBelongingInformation& try_belonging_info) { @@ -725,7 +726,7 @@ bool HDeadCodeElimination::RemoveUnneededTries() { if (try_boundary->HasSameExceptionHandlersAs(*other_try_boundary)) { // Merge the entries as they are really the same one. // Block merging. - it->second.blocks_in_try.Union(&other_it->second.blocks_in_try); + it->second.blocks_in_try.Union(other_it->second.blocks_in_try); // Add the coalesced try entry to update it too. it->second.coalesced_try_entries.SetBit(other_block->GetBlockId()); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 26efefa2d8..4ef0fc907a 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -301,7 +301,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) return false; } - ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); if (!mul->HasOnlyOneNonEnvironmentUse()) { return false; } @@ -3637,8 +3637,8 @@ bool InstructionSimplifierVisitor::TrySubtractionChainSimplification( bool is_x_negated = is_y_negated ^ ((x == right) && y->IsSub()); int64_t const3_val = ComputeAddition(type, const1_val, const2_val); HBasicBlock* block = instruction->GetBlock(); - HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val); - ArenaAllocator* allocator = instruction->GetAllocator(); + HConstant* const3 = GetGraph()->GetConstant(type, const3_val); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); HInstruction* z; if (is_x_negated) { diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 5323ae2445..edd454c93e 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -198,7 +198,7 @@ void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke, CodeGen return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 98aa5600b4..3eaaa6cb94 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2961,9 +2961,8 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { } } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = - new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); + new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length). locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, LocationForSystemArrayCopyInput(invoke->InputAt(1))); @@ -4925,7 +4924,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke, size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke); DataType::Type return_type = invoke->GetType(); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); @@ -5976,8 +5975,7 @@ void VarHandleSlowPathARM64::EmitByteArrayViewCode(CodeGenerator* codegen_in) { } void IntrinsicLocationsBuilderARM64::VisitMethodHandleInvokeExact(HInvoke* invoke) { - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); - LocationSummary* locations = new (allocator) + LocationSummary* locations = new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); InvokeDexCallingConventionVisitorARM64 calling_convention; diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index a6f6eb0ba0..9e60090a03 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -2690,7 +2690,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke, DataType::Type type, bool atomic) { bool can_call = codegen->EmitReadBarrier() && IsUnsafeGetReference(invoke); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -3101,7 +3101,7 @@ static void CreateUnsafePutLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen, DataType::Type type, bool atomic) { - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. @@ -3115,7 +3115,7 @@ static void CreateUnsafePutAbsoluteLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen, DataType::Type type, bool atomic) { - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::NoLocation()); // Unused receiver. @@ -3752,7 +3752,7 @@ class ReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL { static void CreateUnsafeCASLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) { const bool can_call = codegen->EmitReadBarrier() && IsUnsafeCASReference(invoke); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -4046,7 +4046,7 @@ static void CreateUnsafeGetAndUpdateLocations(HInvoke* invoke, DataType::Type type, GetAndUpdateOp get_and_update_op) { const bool can_call = codegen->EmitReadBarrier() && IsUnsafeGetAndSetReference(invoke); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -4653,7 +4653,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke, size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke); DataType::Type return_type = invoke->GetType(); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc index cc0f114c56..4c56800920 100644 --- a/compiler/optimizing/intrinsics_riscv64.cc +++ b/compiler/optimizing/intrinsics_riscv64.cc @@ -3858,7 +3858,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke, size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke); DataType::Type return_type = invoke->GetType(); - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 5a6b8832c4..5710ce42bb 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -4117,7 +4117,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorX86* codeg return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); @@ -4253,7 +4253,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorX86* codeg return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); @@ -4430,7 +4430,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke, CodeGeneratorX86* return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->AddRegisterTemps(2); @@ -4630,7 +4630,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->AddRegisterTemps(2); @@ -4810,7 +4810,7 @@ static void CreateVarHandleGetAndAddLocations(HInvoke* invoke, CodeGeneratorX86* return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); locations->AddRegisterTemps(2); @@ -4985,7 +4985,7 @@ static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke, CodeGenerat return; } - ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + ArenaAllocator* allocator = codegen->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary( invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); // We need a byte register temp to store the result of the bitwise operation diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc index 4189bc4053..f419263f62 100644 --- a/compiler/optimizing/locations.cc +++ b/compiler/optimizing/locations.cc @@ -26,17 +26,24 @@ namespace art HIDDEN { // Verify that Location is trivially copyable. static_assert(std::is_trivially_copyable<Location>::value, "Location should be trivially copyable"); +static inline ArrayRef<Location> AllocateInputLocations(HInstruction* instruction, + ArenaAllocator* allocator) { + size_t input_count = instruction->InputCount(); + Location* array = allocator->AllocArray<Location>(input_count, kArenaAllocLocationSummary); + return {array, input_count}; +} + LocationSummary::LocationSummary(HInstruction* instruction, CallKind call_kind, bool intrinsified, ArenaAllocator* allocator) - : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)), + : inputs_(AllocateInputLocations(instruction, allocator)), temps_(allocator->Adapter(kArenaAllocLocationSummary)), + stack_mask_(nullptr), call_kind_(call_kind), intrinsified_(intrinsified), has_custom_slow_path_calling_convention_(false), output_overlaps_(Location::kOutputOverlap), - stack_mask_(nullptr), register_mask_(0), live_registers_(RegisterSet::Empty()), custom_slow_path_caller_saves_(RegisterSet::Empty()) { diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index 2209f05c0b..b8fe29c621 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -19,6 +19,7 @@ #include "base/arena_containers.h" #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/bit_field.h" #include "base/bit_utils.h" #include "base/bit_vector.h" @@ -39,7 +40,7 @@ std::ostream& operator<<(std::ostream& os, const Location& location); */ class Location : public ValueObject { public: - enum OutputOverlap { + enum OutputOverlap : uint8_t { // The liveness of the output overlaps the liveness of one or // several input(s); the register allocator cannot reuse an // input's location for the output's location. @@ -534,7 +535,7 @@ static constexpr bool kIntrinsified = true; */ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { public: - enum CallKind { + enum CallKind : uint8_t { kNoCall, kCallOnMainAndSlowPath, kCallOnSlowPath, @@ -713,8 +714,13 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { bool intrinsified, ArenaAllocator* allocator); - ArenaVector<Location> inputs_; + ArrayRef<Location> inputs_; ArenaVector<Location> temps_; + Location output_; + + // Mask of objects that live in the stack. + BitVector* stack_mask_; + const CallKind call_kind_; // Whether these are locations for an intrinsified call. const bool intrinsified_; @@ -723,10 +729,6 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot // share the same register as the inputs. Location::OutputOverlap output_overlaps_; - Location output_; - - // Mask of objects that live in the stack. - BitVector* stack_mask_; // Mask of objects that live in register. uint32_t register_mask_; @@ -734,7 +736,8 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { // Registers that are in use at this position. RegisterSet live_registers_; - // Custom slow path caller saves. Valid only if indicated by slow_path_calling_convention_. + // Custom slow path caller saves. Valid only if indicated by + // `has_custom_slow_path_calling_convention_`. RegisterSet custom_slow_path_caller_saves_; ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 752e8b10d1..6b74e7246e 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -1457,18 +1457,17 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement, bool strictly_dominated) { HBasicBlock* dominator_block = dominator->GetBlock(); - std::optional<ArenaBitVector> visited_blocks; + BitVectorView<size_t> visited_blocks; // Lazily compute the dominated blocks to faster calculation of domination afterwards. auto maybe_generate_visited_blocks = [&visited_blocks, this, dominator_block]() { - if (visited_blocks.has_value()) { + if (visited_blocks.SizeInBits() != 0u) { + DCHECK_EQ(visited_blocks.SizeInBits(), GetBlock()->GetGraph()->GetBlocks().size()); return; } HGraph* graph = GetBlock()->GetGraph(); - visited_blocks.emplace(graph->GetAllocator(), - graph->GetBlocks().size(), - /* expandable= */ false, - kArenaAllocMisc); + visited_blocks = ArenaBitVector::CreateFixedSize( + graph->GetAllocator(), graph->GetBlocks().size(), kArenaAllocMisc); ScopedArenaAllocator allocator(graph->GetArenaStack()); ScopedArenaQueue<const HBasicBlock*> worklist(allocator.Adapter(kArenaAllocMisc)); worklist.push(dominator_block); @@ -1476,9 +1475,9 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, while (!worklist.empty()) { const HBasicBlock* current = worklist.front(); worklist.pop(); - visited_blocks->SetBit(current->GetBlockId()); + visited_blocks.SetBit(current->GetBlockId()); for (HBasicBlock* dominated : current->GetDominatedBlocks()) { - if (visited_blocks->IsBitSet(dominated->GetBlockId())) { + if (visited_blocks.IsBitSet(dominated->GetBlockId())) { continue; } worklist.push(dominated); @@ -1501,7 +1500,7 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, } else { // Block domination. maybe_generate_visited_blocks(); - dominated = visited_blocks->IsBitSet(block->GetBlockId()); + dominated = visited_blocks.IsBitSet(block->GetBlockId()); } if (dominated) { @@ -1512,7 +1511,7 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, // for their inputs. HBasicBlock* predecessor = block->GetPredecessors()[index]; maybe_generate_visited_blocks(); - if (visited_blocks->IsBitSet(predecessor->GetBlockId())) { + if (visited_blocks.IsBitSet(predecessor->GetBlockId())) { user->ReplaceInput(replacement, index); } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 772828d9ef..bcf27ae9fa 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2113,7 +2113,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { HInstruction* GetPreviousDisregardingMoves() const; HBasicBlock* GetBlock() const { return block_; } - ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); } void SetBlock(HBasicBlock* block) { block_ = block; } bool IsInBlock() const { return block_ != nullptr; } bool IsInLoop() const { return block_->IsInLoop(); } diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 9867e11f35..0417f04c12 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -218,7 +218,7 @@ static void BoundTypeIn(HInstruction* receiver, : start_block->GetFirstInstruction(); if (ShouldCreateBoundType( insert_point, receiver, class_rti, start_instruction, start_block)) { - bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver); + bound_type = new (start_block->GetGraph()->GetAllocator()) HBoundType(receiver); bound_type->SetUpperBound(class_rti, /* can_be_null= */ false); start_block->InsertInstructionBefore(bound_type, insert_point); // To comply with the RTP algorithm, don't type the bound type just yet, it will diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 0796acc687..b2a3846dc4 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -139,10 +139,8 @@ bool SsaRedundantPhiElimination::Run() { } } - ArenaBitVector visited_phis_in_cycle(&allocator, - graph_->GetCurrentInstructionId(), - /* expandable= */ false, - kArenaAllocSsaPhiElimination); + BitVectorView<size_t> visited_phis_in_cycle = ArenaBitVector::CreateFixedSize( + &allocator, graph_->GetCurrentInstructionId(), kArenaAllocSsaPhiElimination); ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination)); while (!worklist.empty()) { diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc index a815d06cb8..9f20fc2495 100644 --- a/libdexfile/dex/code_item_accessors_test.cc +++ b/libdexfile/dex/code_item_accessors_test.cc @@ -61,10 +61,6 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) { std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex=*/false, &standard_dex_data)); ASSERT_TRUE(standard_dex != nullptr); - std::vector<uint8_t> compact_dex_data; - std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex=*/true, - &compact_dex_data)); - ASSERT_TRUE(compact_dex != nullptr); static constexpr uint16_t kRegisterSize = 2; static constexpr uint16_t kInsSize = 1; static constexpr uint16_t kOutsSize = 3; @@ -98,19 +94,6 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) { dex_code_item->tries_size_ = kTriesSize; dex_code_item->insns_size_in_code_units_ = kInsnsSizeInCodeUnits; verify_code_item(standard_dex.get(), dex_code_item, dex_code_item->insns_); - - CompactDexFile::CodeItem* cdex_code_item = - reinterpret_cast<CompactDexFile::CodeItem*>(const_cast<uint8_t*>(compact_dex->Begin() + - CompactDexFile::CodeItem::kMaxPreHeaderSize * sizeof(uint16_t))); - std::vector<uint16_t> preheader; - cdex_code_item->Create(kRegisterSize, - kInsSize, - kOutsSize, - kTriesSize, - kInsnsSizeInCodeUnits, - cdex_code_item->GetPreHeader()); - - verify_code_item(compact_dex.get(), cdex_code_item, cdex_code_item->insns_); } } // namespace art diff --git a/libdexfile/dex/compact_dex_file_test.cc b/libdexfile/dex/compact_dex_file_test.cc index 799967e255..345e66b1d5 100644 --- a/libdexfile/dex/compact_dex_file_test.cc +++ b/libdexfile/dex/compact_dex_file_test.cc @@ -38,8 +38,8 @@ TEST(CompactDexFileTest, MagicAndVersion) { } EXPECT_EQ(valid_magic, CompactDexFile::IsMagicValid(header)); EXPECT_EQ(valid_version, CompactDexFile::IsVersionValid(header)); - EXPECT_EQ(valid_magic, DexFileLoader::IsMagicValid(header)); - EXPECT_EQ(valid_magic && valid_version, DexFileLoader::IsVersionAndMagicValid(header)); + EXPECT_FALSE(DexFileLoader::IsMagicValid(header)); + EXPECT_FALSE(DexFileLoader::IsVersionAndMagicValid(header)); } } } diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc index e92a5ac813..df9c9c11cb 100644 --- a/libdexfile/dex/dex_file_loader.cc +++ b/libdexfile/dex/dex_file_loader.cc @@ -131,18 +131,11 @@ bool DexFileLoader::IsMagicValid(uint32_t magic) { } bool DexFileLoader::IsMagicValid(const uint8_t* magic) { - return StandardDexFile::IsMagicValid(magic) || - CompactDexFile::IsMagicValid(magic); + return StandardDexFile::IsMagicValid(magic); } bool DexFileLoader::IsVersionAndMagicValid(const uint8_t* magic) { - if (StandardDexFile::IsMagicValid(magic)) { - return StandardDexFile::IsVersionValid(magic); - } - if (CompactDexFile::IsMagicValid(magic)) { - return CompactDexFile::IsVersionValid(magic); - } - return false; + return StandardDexFile::IsMagicValid(magic) && StandardDexFile::IsVersionValid(magic); } bool DexFileLoader::IsMultiDexLocation(std::string_view location) { @@ -474,9 +467,6 @@ std::unique_ptr<DexFile> DexFileLoader::OpenCommon(std::shared_ptr<DexFileContai if (size >= sizeof(StandardDexFile::Header) && StandardDexFile::IsMagicValid(base)) { uint32_t checksum = location_checksum.value_or(header->checksum_); dex_file.reset(new StandardDexFile(base, location, checksum, oat_dex_file, container)); - } else if (size >= sizeof(CompactDexFile::Header) && CompactDexFile::IsMagicValid(base)) { - uint32_t checksum = location_checksum.value_or(header->checksum_); - dex_file.reset(new CompactDexFile(base, location, checksum, oat_dex_file, container)); } else { *error_msg = StringPrintf("Invalid or truncated dex file '%s'", location.c_str()); } @@ -489,8 +479,7 @@ std::unique_ptr<DexFile> DexFileLoader::OpenCommon(std::shared_ptr<DexFileContai dex_file.reset(); return nullptr; } - // NB: Dex verifier does not understand the compact dex format. - if (verify && !dex_file->IsCompactDexFile()) { + if (verify) { DEXFILE_SCOPED_TRACE(std::string("Verify dex file ") + location); if (!dex::Verify(dex_file.get(), location.c_str(), verify_checksum, error_msg)) { if (error_code != nullptr) { diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index ab07ea0f02..d574f8e139 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -427,6 +427,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .WithValueMap(hiddenapi_policy_valuemap) .IntoKey(M::HiddenApiPolicy) .Define("-Xcore-platform-api-policy:_") + .WithHelp("Ignored for SDK level 36+.") .WithType<hiddenapi::EnforcementPolicy>() .WithValueMap(hiddenapi_policy_valuemap) .IntoKey(M::CorePlatformApiPolicy) diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 23e06ab792..6f2822fda0 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1725,13 +1725,37 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { hidden_api_policy_ = runtime_options.GetOrDefault(Opt::HiddenApiPolicy); DCHECK_IMPLIES(is_zygote_, hidden_api_policy_ == hiddenapi::EnforcementPolicy::kDisabled); - // Set core platform API enforcement policy. The checks are disabled by default and - // can be enabled with a command line flag. AndroidRuntime will pass the flag if - // a system property is set. - core_platform_api_policy_ = runtime_options.GetOrDefault(Opt::CorePlatformApiPolicy); - if (core_platform_api_policy_ != hiddenapi::EnforcementPolicy::kDisabled) { - LOG(INFO) << "Core platform API reporting enabled, enforcing=" - << (core_platform_api_policy_ == hiddenapi::EnforcementPolicy::kEnabled ? "true" : "false"); + // Set core platform API enforcement policy. Always enabled if the platform + // SDK level is 36+, otherwise the checks are disabled by default and can be + // enabled with a command line flag. AndroidRuntime will pass the flag if a + // system property is set. + { + bool always_enable = false; +#ifdef ART_TARGET_ANDROID + int device_sdk_version = android_get_device_api_level(); + if (device_sdk_version >= 36) { + always_enable = true; + } else if (device_sdk_version == 35) { + std::string codename = + android::base::GetProperty("ro.build.version.codename", /*default_value=*/""); + always_enable = (codename == "Baklava"); + } +#endif + const char* reason; + if (always_enable) { + core_platform_api_policy_ = hiddenapi::EnforcementPolicy::kEnabled; + reason = "for Android 16+"; + } else { + core_platform_api_policy_ = runtime_options.GetOrDefault(Opt::CorePlatformApiPolicy); + reason = "by runtime option"; + } + if (core_platform_api_policy_ != hiddenapi::EnforcementPolicy::kDisabled) { + LOG(INFO) << "Core platform API " + << (core_platform_api_policy_ == hiddenapi::EnforcementPolicy::kEnabled + ? "enforcement" + : "reporting") + << " enabled " << reason; + } } // Dex2Oat's Runtime does not need the signal chain or the fault handler diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc index 1d3a9fbe66..c8f1bfd810 100644 --- a/runtime/vdex_file.cc +++ b/runtime/vdex_file.cc @@ -204,6 +204,23 @@ std::unique_ptr<VdexFile> VdexFile::OpenFromDm(const std::string& filename, return vdex_file; } +bool VdexFile::IsValid() const { + if (mmap_.Size() < sizeof(VdexFileHeader) || !GetVdexFileHeader().IsValid()) { + return false; + } + + // Invalidate vdex files that contain dex files in the no longer supported + // compact dex format. Revert this whenever the vdex version is bumped. + size_t i = 0; + for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr, i); dex_file_start != nullptr; + dex_file_start = GetNextDexFileData(dex_file_start, ++i)) { + if (!DexFileLoader::IsMagicValid(dex_file_start)) { + return false; + } + } + return true; +} + const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor, uint32_t dex_file_index) const { DCHECK(cursor == nullptr || (cursor > Begin() && cursor <= End())); if (cursor == nullptr) { diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h index 4ccc402b32..a85c015202 100644 --- a/runtime/vdex_file.h +++ b/runtime/vdex_file.h @@ -124,6 +124,7 @@ class VdexFile { static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' }; // The format version of the verifier deps header and the verifier deps. + // TODO: Revert the dex header checks in VdexFile::IsValid when this is bumped. // Last update: Introduce vdex sections. static constexpr uint8_t kVdexVersion[] = { '0', '2', '7', '\0' }; @@ -251,9 +252,7 @@ class VdexFile { GetSectionHeader(VdexSection::kVerifierDepsSection).section_size); } - bool IsValid() const { - return mmap_.Size() >= sizeof(VdexFileHeader) && GetVdexFileHeader().IsValid(); - } + EXPORT bool IsValid() const; // This method is for iterating over the dex files in the vdex. If `cursor` is null, // the first dex file is returned. If `cursor` is not null, it must point to a dex |