diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 129 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.h | 5 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 106 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 4 | ||||
| -rw-r--r-- | compiler/optimizing/common_arm64.h | 26 | ||||
| -rw-r--r-- | compiler/optimizing/gvn.cc | 5 | ||||
| -rw-r--r-- | compiler/optimizing/instruction_simplifier.cc | 42 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_liveness_analysis.cc | 13 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_liveness_analysis.h | 22 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_liveness_analysis_test.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 103 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_stream.h | 71 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_test.cc | 138 |
15 files changed, 309 insertions, 376 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index b3feb787a9..2589869859 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1055,7 +1055,8 @@ void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region, void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path) { + SlowPathCode* slow_path, + bool native_debug_info) { if (instruction != nullptr) { // The code generated for some type conversions // may call the runtime, thus normally requiring a subsequent @@ -1120,12 +1121,23 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, outer_dex_pc = outer_environment->GetDexPc(); outer_environment_size = outer_environment->Size(); } + + HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); + bool osr = + instruction->IsSuspendCheck() && + (info != nullptr) && + graph_->IsCompilingOsr() && + (inlining_depth == 0); + StackMap::Kind kind = native_debug_info + ? StackMap::Kind::Debug + : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default); stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, - inlining_depth); + inlining_depth, + kind); EmitEnvironment(environment, slow_path); // Record invoke info, the common case for the trampoline is super and static invokes. Only // record these to reduce oat file size. @@ -1138,19 +1150,9 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } stack_map_stream->EndStackMapEntry(); - HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); - if (instruction->IsSuspendCheck() && - (info != nullptr) && - graph_->IsCompilingOsr() && - (inlining_depth == 0)) { + if (osr) { DCHECK_EQ(info->GetSuspendCheck(), instruction); - // We duplicate the stack map as a marker that this stack map can be an OSR entry. - // Duplicating it avoids having the runtime recognize and skip an OSR stack map. DCHECK(info->IsIrreducible()); - stack_map_stream->BeginStackMapEntry( - dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0); - EmitEnvironment(instruction->GetEnvironment(), slow_path); - stack_map_stream->EndStackMapEntry(); if (kIsDebugBuild) { for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { HInstruction* in_environment = environment->GetInstructionAt(i); @@ -1167,14 +1169,6 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } } } - } else if (kIsDebugBuild) { - // Ensure stack maps are unique, by checking that the native pc in the stack map - // last emitted is different than the native pc of the stack map just emitted. - size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps(); - if (number_of_stack_maps > 1) { - DCHECK_NE(stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 1), - stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 2)); - } } } @@ -1196,12 +1190,11 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, // Ensure that we do not collide with the stack map of the previous instruction. GenerateNop(); } - RecordPcInfo(instruction, dex_pc, slow_path); + RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true); } } void CodeGenerator::RecordCatchBlockInfo() { - ArenaAllocator* allocator = graph_->GetAllocator(); StackMapStream* stack_map_stream = GetStackMapStream(); for (HBasicBlock* block : *block_order_) { @@ -1213,28 +1206,24 @@ void CodeGenerator::RecordCatchBlockInfo() { uint32_t num_vregs = graph_->GetNumberOfVRegs(); uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment. uint32_t native_pc = GetAddressOf(block); - uint32_t register_mask = 0; // Not used. - - // The stack mask is not used, so we leave it empty. - ArenaBitVector* stack_mask = - ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator); stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, - register_mask, - stack_mask, + /* register_mask */ 0, + /* stack_mask */ nullptr, num_vregs, - inlining_depth); + inlining_depth, + StackMap::Kind::Catch); HInstruction* current_phi = block->GetFirstPhi(); for (size_t vreg = 0; vreg < num_vregs; ++vreg) { - while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) { - HInstruction* next_phi = current_phi->GetNext(); - DCHECK(next_phi == nullptr || - current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber()) - << "Phis need to be sorted by vreg number to keep this a linear-time loop."; - current_phi = next_phi; - } + while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) { + HInstruction* next_phi = current_phi->GetNext(); + DCHECK(next_phi == nullptr || + current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber()) + << "Phis need to be sorted by vreg number to keep this a linear-time loop."; + current_phi = next_phi; + } if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) { stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); @@ -1294,50 +1283,45 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo continue; } + using Kind = DexRegisterLocation::Kind; Location location = environment->GetLocationAt(i); switch (location.GetKind()) { case Location::kConstant: { DCHECK_EQ(current, location.GetConstant()); if (current->IsLongConstant()) { int64_t value = current->AsLongConstant()->GetValue(); - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kConstant, Low32Bits(value)); - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kConstant, High32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value)); ++i; DCHECK_LT(i, environment_size); } else if (current->IsDoubleConstant()) { int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue()); - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kConstant, Low32Bits(value)); - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kConstant, High32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value)); ++i; DCHECK_LT(i, environment_size); } else if (current->IsIntConstant()) { int32_t value = current->AsIntConstant()->GetValue(); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value); } else if (current->IsNullConstant()) { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0); } else { DCHECK(current->IsFloatConstant()) << current->DebugName(); int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue()); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value); } break; } case Location::kStackSlot: { - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex()); break; } case Location::kDoubleStackSlot: { + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex()); stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize)); + Kind::kInStack, location.GetHighStackIndex(kVRegSize)); ++i; DCHECK_LT(i, environment_size); break; @@ -1347,17 +1331,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int id = location.reg(); if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); if (current->GetType() == DataType::Type::kInt64) { - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInStack, offset + kVRegSize); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize); ++i; DCHECK_LT(i, environment_size); } } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id); if (current->GetType() == DataType::Type::kInt64) { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id); + stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id); ++i; DCHECK_LT(i, environment_size); } @@ -1369,18 +1352,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int id = location.reg(); if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); if (current->GetType() == DataType::Type::kFloat64) { - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInStack, offset + kVRegSize); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize); ++i; DCHECK_LT(i, environment_size); } } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id); if (current->GetType() == DataType::Type::kFloat64) { - stack_map_stream->AddDexRegisterEntry( - DexRegisterLocation::Kind::kInFpuRegisterHigh, id); + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id); ++i; DCHECK_LT(i, environment_size); } @@ -1393,16 +1374,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int high = location.high(); if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low); + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low); } if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) { uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); ++i; } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high); + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high); ++i; } DCHECK_LT(i, environment_size); @@ -1414,15 +1395,15 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo int high = location.high(); if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low); + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low); } if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) { uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high); - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); } else { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high); + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high); } ++i; DCHECK_LT(i, environment_size); @@ -1430,7 +1411,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } case Location::kInvalid: { - stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0); break; } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index b3c29aa804..03ae4983d4 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -323,7 +323,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { } // Record native to dex mapping for a suspend point. Required by runtime. - void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr); + void RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr, + bool native_debug_info = false); // Check whether we have already recorded mapping at this PC. bool HasStackMapAtCurrentPc(); // Record extra stack maps if we support native debugging. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 5f0533cbe9..8aa790db34 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2459,6 +2459,9 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { // all & reg_bits - 1. __ Ror(dst, lhs, RegisterFrom(instr->GetLocations()->InAt(1), type)); } + } else if (instr->IsMin() || instr->IsMax()) { + __ Cmp(lhs, rhs); + __ Csel(dst, lhs, rhs, instr->IsMin() ? lt : gt); } else { DCHECK(instr->IsXor()); __ Eor(dst, lhs, rhs); @@ -2474,6 +2477,10 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { __ Fadd(dst, lhs, rhs); } else if (instr->IsSub()) { __ Fsub(dst, lhs, rhs); + } else if (instr->IsMin()) { + __ Fmin(dst, lhs, rhs); + } else if (instr->IsMax()) { + __ Fmax(dst, lhs, rhs); } else { LOG(FATAL) << "Unexpected floating-point binary operation"; } @@ -5671,111 +5678,20 @@ void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { } } -// TODO: integrate with HandleBinaryOp? -static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) { - LocationSummary* locations = new (allocator) LocationSummary(minmax); - switch (minmax->GetResultType()) { - case DataType::Type::kInt32: - case DataType::Type::kInt64: - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); - break; - case DataType::Type::kFloat32: - case DataType::Type::kFloat64: - locations->SetInAt(0, Location::RequiresFpuRegister()); - locations->SetInAt(1, Location::RequiresFpuRegister()); - locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); - break; - default: - LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType(); - } -} - -void InstructionCodeGeneratorARM64::GenerateMinMaxInt(LocationSummary* locations, - bool is_min, - DataType::Type type) { - Location op1 = locations->InAt(0); - Location op2 = locations->InAt(1); - Location out = locations->Out(); - - Register op1_reg; - Register op2_reg; - Register out_reg; - if (type == DataType::Type::kInt64) { - op1_reg = XRegisterFrom(op1); - op2_reg = XRegisterFrom(op2); - out_reg = XRegisterFrom(out); - } else { - DCHECK_EQ(type, DataType::Type::kInt32); - op1_reg = WRegisterFrom(op1); - op2_reg = WRegisterFrom(op2); - out_reg = WRegisterFrom(out); - } - - __ Cmp(op1_reg, op2_reg); - __ Csel(out_reg, op1_reg, op2_reg, is_min ? lt : gt); -} - -void InstructionCodeGeneratorARM64::GenerateMinMaxFP(LocationSummary* locations, - bool is_min, - DataType::Type type) { - Location op1 = locations->InAt(0); - Location op2 = locations->InAt(1); - Location out = locations->Out(); - - FPRegister op1_reg; - FPRegister op2_reg; - FPRegister out_reg; - if (type == DataType::Type::kFloat64) { - op1_reg = DRegisterFrom(op1); - op2_reg = DRegisterFrom(op2); - out_reg = DRegisterFrom(out); - } else { - DCHECK_EQ(type, DataType::Type::kFloat32); - op1_reg = SRegisterFrom(op1); - op2_reg = SRegisterFrom(op2); - out_reg = SRegisterFrom(out); - } - - if (is_min) { - __ Fmin(out_reg, op1_reg, op2_reg); - } else { - __ Fmax(out_reg, op1_reg, op2_reg); - } -} - -// TODO: integrate with HandleBinaryOp? -void InstructionCodeGeneratorARM64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) { - DataType::Type type = minmax->GetResultType(); - switch (type) { - case DataType::Type::kInt32: - case DataType::Type::kInt64: - GenerateMinMaxInt(minmax->GetLocations(), is_min, type); - break; - case DataType::Type::kFloat32: - case DataType::Type::kFloat64: - GenerateMinMaxFP(minmax->GetLocations(), is_min, type); - break; - default: - LOG(FATAL) << "Unexpected type for HMinMax " << type; - } -} - void LocationsBuilderARM64::VisitMin(HMin* min) { - CreateMinMaxLocations(GetGraph()->GetAllocator(), min); + HandleBinaryOp(min); } void InstructionCodeGeneratorARM64::VisitMin(HMin* min) { - GenerateMinMax(min, /*is_min*/ true); + HandleBinaryOp(min); } void LocationsBuilderARM64::VisitMax(HMax* max) { - CreateMinMaxLocations(GetGraph()->GetAllocator(), max); + HandleBinaryOp(max); } void InstructionCodeGeneratorARM64::VisitMax(HMax* max) { - GenerateMinMax(max, /*is_min*/ false); + HandleBinaryOp(max); } void LocationsBuilderARM64::VisitAbs(HAbs* abs) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index e7fe5b71b7..5afb712d17 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -280,10 +280,6 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); void HandleCondition(HCondition* instruction); - void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type); - void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type); - void GenerateMinMax(HBinaryOperation* minmax, bool is_min); - // Generate a heap reference load using one register `out`: // // out <- *(out + offset) diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index ed2f8e995d..5191ee2b1e 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -234,6 +234,13 @@ inline vixl::aarch64::Operand OperandFromMemOperand( } } +inline bool AddSubCanEncodeAsImmediate(int64_t value) { + // If `value` does not fit but `-value` does, VIXL will automatically use + // the 'opposite' instruction. + return vixl::aarch64::Assembler::IsImmAddSub(value) + || vixl::aarch64::Assembler::IsImmAddSub(-value); +} + inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) { int64_t value = CodeGenerator::GetInt64ValueOf(constant); @@ -249,6 +256,20 @@ inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* return IsUint<8>(value); } + // Code generation for Min/Max: + // Cmp left_op, right_op + // Csel dst, left_op, right_op, cond + if (instr->IsMin() || instr->IsMax()) { + if (constant->GetUses().HasExactlyOneElement()) { + // If value can be encoded as immediate for the Cmp, then let VIXL handle + // the constant generation for the Csel. + return AddSubCanEncodeAsImmediate(value); + } + // These values are encodable as immediates for Cmp and VIXL will use csinc and csinv + // with the zr register as right_op, hence no constant generation is required. + return constant->IsZeroBitPattern() || constant->IsOne() || constant->IsMinusOne(); + } + // For single uses we let VIXL handle the constant generation since it will // use registers that are not managed by the register allocator (wip0, wip1). if (constant->GetUses().HasExactlyOneElement()) { @@ -275,10 +296,7 @@ inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr->IsSub()) << instr->DebugName(); // Uses aliases of ADD/SUB instructions. - // If `value` does not fit but `-value` does, VIXL will automatically use - // the 'opposite' instruction. - return vixl::aarch64::Assembler::IsImmAddSub(value) - || vixl::aarch64::Assembler::IsImmAddSub(-value); + return AddSubCanEncodeAsImmediate(value); } } diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index 4863718518..e6b6326726 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -479,7 +479,10 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) { HInstruction* next = current->GetNext(); // Do not kill the set with the side effects of the instruction just now: if // the instruction is GVN'ed, we don't need to kill. - if (current->CanBeMoved()) { + // + // BoundType is a special case example of an instruction which shouldn't be moved but can be + // GVN'ed. + if (current->CanBeMoved() || current->IsBoundType()) { if (current->IsBinaryOperation() && current->AsBinaryOperation()->IsCommutative()) { // For commutative ops, (x op y) will be treated the same as (y op x) // after fixed ordering. diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index d532eeeb52..c979a5a56c 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -117,6 +117,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void SimplifyFP2Int(HInvoke* invoke); void SimplifyStringCharAt(HInvoke* invoke); void SimplifyStringIsEmptyOrLength(HInvoke* invoke); + void SimplifyStringIndexOf(HInvoke* invoke); void SimplifyNPEOnArgN(HInvoke* invoke, size_t); void SimplifyReturnThis(HInvoke* invoke); void SimplifyAllocationIntrinsic(HInvoke* invoke); @@ -2417,6 +2418,43 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement); } +void InstructionSimplifierVisitor::SimplifyStringIndexOf(HInvoke* invoke) { + DCHECK(invoke->GetIntrinsic() == Intrinsics::kStringIndexOf || + invoke->GetIntrinsic() == Intrinsics::kStringIndexOfAfter); + if (invoke->InputAt(0)->IsLoadString()) { + HLoadString* load_string = invoke->InputAt(0)->AsLoadString(); + const DexFile& dex_file = load_string->GetDexFile(); + uint32_t utf16_length; + const char* data = + dex_file.StringDataAndUtf16LengthByIdx(load_string->GetStringIndex(), &utf16_length); + if (utf16_length == 0) { + invoke->ReplaceWith(GetGraph()->GetIntConstant(-1)); + invoke->GetBlock()->RemoveInstruction(invoke); + RecordSimplification(); + return; + } + if (utf16_length == 1 && invoke->GetIntrinsic() == Intrinsics::kStringIndexOf) { + // Simplify to HSelect(HEquals(., load_string.charAt(0)), 0, -1). + // If the sought character is supplementary, this gives the correct result, i.e. -1. + uint32_t c = GetUtf16FromUtf8(&data); + DCHECK_EQ(GetTrailingUtf16Char(c), 0u); + DCHECK_EQ(GetLeadingUtf16Char(c), c); + uint32_t dex_pc = invoke->GetDexPc(); + ArenaAllocator* allocator = GetGraph()->GetAllocator(); + HEqual* equal = + new (allocator) HEqual(invoke->InputAt(1), GetGraph()->GetIntConstant(c), dex_pc); + invoke->GetBlock()->InsertInstructionBefore(equal, invoke); + HSelect* result = new (allocator) HSelect(equal, + GetGraph()->GetIntConstant(0), + GetGraph()->GetIntConstant(-1), + dex_pc); + invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, result); + RecordSimplification(); + return; + } + } +} + // This method should only be used on intrinsics whose sole way of throwing an // exception is raising a NPE when the nth argument is null. If that argument // is provably non-null, we can clear the flag. @@ -2554,6 +2592,10 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { case Intrinsics::kStringLength: SimplifyStringIsEmptyOrLength(instruction); break; + case Intrinsics::kStringIndexOf: + case Intrinsics::kStringIndexOfAfter: + SimplifyStringIndexOf(instruction); + break; case Intrinsics::kStringStringIndexOf: case Intrinsics::kStringStringIndexOfAfter: SimplifyNPEOnArgN(instruction, 1); // 0th has own NullCheck diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index ef8a757ad0..661f66a34c 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2786,6 +2786,14 @@ void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) { SetPackedFlag<kFlagReferenceTypeIsExact>(rti.IsExact()); } +bool HBoundType::InstructionDataEquals(const HInstruction* other) const { + const HBoundType* other_bt = other->AsBoundType(); + ScopedObjectAccess soa(Thread::Current()); + return GetUpperBound().IsEqual(other_bt->GetUpperBound()) && + GetUpperCanBeNull() == other_bt->GetUpperCanBeNull() && + CanBeNull() == other_bt->CanBeNull(); +} + void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) { if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 2037879726..975ad1c324 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -7142,6 +7142,7 @@ class HBoundType FINAL : public HExpression<1> { SetRawInputAt(0, input); } + bool InstructionDataEquals(const HInstruction* other) const OVERRIDE; bool IsClonable() const OVERRIDE { return true; } // {Get,Set}Upper* should only be used in reference type propagation. diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index f6bd05269e..2f782f39fc 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -195,14 +195,19 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { // SsaLivenessAnalysis. for (size_t i = 0, e = environment->Size(); i < e; ++i) { HInstruction* instruction = environment->GetInstructionAt(i); + if (instruction == nullptr) { + continue; + } bool should_be_live = ShouldBeLiveForEnvironment(current, instruction); + // If this environment use does not keep the instruction live, it does not + // affect the live range of that instruction. if (should_be_live) { CHECK(instruction->HasSsaIndex()) << instruction->DebugName(); live_in->SetBit(instruction->GetSsaIndex()); - } - if (instruction != nullptr) { - instruction->GetLiveInterval()->AddUse( - current, environment, i, /* actual_user */ nullptr, should_be_live); + instruction->GetLiveInterval()->AddUse(current, + environment, + i, + /* actual_user */ nullptr); } } } diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index f83bb52b69..83ca5bd5fa 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -300,8 +300,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { void AddUse(HInstruction* instruction, HEnvironment* environment, size_t input_index, - HInstruction* actual_user = nullptr, - bool keep_alive = false) { + HInstruction* actual_user = nullptr) { bool is_environment = (environment != nullptr); LocationSummary* locations = instruction->GetLocations(); if (actual_user == nullptr) { @@ -359,12 +358,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { uses_.push_front(*new_use); } - if (is_environment && !keep_alive) { - // If this environment use does not keep the instruction live, it does not - // affect the live range of that instruction. - return; - } - size_t start_block_position = instruction->GetBlock()->GetLifetimeStart(); if (first_range_ == nullptr) { // First time we see a use of that interval. @@ -1157,8 +1150,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { * of an instruction that has a primitive type make the instruction live. * If the graph does not have the debuggable property, the environment * use has no effect, and may get a 'none' value after register allocation. + * (d) When compiling in OSR mode, all loops in the compiled method may be entered + * from the interpreter via SuspendCheck; such use in SuspendCheck makes the instruction + * live. * - * (b) and (c) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment. + * (b), (c) and (d) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment. */ class SsaLivenessAnalysis : public ValueObject { public: @@ -1259,14 +1255,18 @@ class SsaLivenessAnalysis : public ValueObject { // Returns whether `instruction` in an HEnvironment held by `env_holder` // should be kept live by the HEnvironment. static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) { - if (instruction == nullptr) return false; + DCHECK(instruction != nullptr); // A value that's not live in compiled code may still be needed in interpreter, // due to code motion, etc. if (env_holder->IsDeoptimize()) return true; // A value live at a throwing instruction in a try block may be copied by // the exception handler to its location at the top of the catch block. if (env_holder->CanThrowIntoCatchBlock()) return true; - if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true; + HGraph* graph = instruction->GetBlock()->GetGraph(); + if (graph->IsDebuggable()) return true; + // When compiling in OSR mode, all loops in the compiled method may be entered + // from the interpreter via SuspendCheck; thus we need to preserve the environment. + if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true; return instruction->GetType() == DataType::Type::kReference; } diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index b9bfbaa173..ae5e4e7176 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -134,12 +134,12 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { static const char* const expected[] = { "ranges: { [2,21) }, uses: { 15 17 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " "is_high: 0", - "ranges: { [4,21) }, uses: { 19 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " + "ranges: { [4,21) }, uses: { 19 21 }, { } is_fixed: 0, is_split: 0 is_low: 0 " "is_high: 0", - "ranges: { [6,21) }, uses: { 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " + "ranges: { [6,21) }, uses: { 21 }, { } is_fixed: 0, is_split: 0 is_low: 0 " "is_high: 0", // Environment uses do not keep the non-reference argument alive. - "ranges: { [8,10) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + "ranges: { [8,10) }, uses: { }, { } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", // Environment uses keep the reference argument alive. "ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", }; @@ -207,11 +207,11 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { static const char* const expected[] = { "ranges: { [2,23) }, uses: { 15 17 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 " "is_high: 0", - "ranges: { [4,23) }, uses: { 19 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 " + "ranges: { [4,23) }, uses: { 19 23 }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 " "is_high: 0", - "ranges: { [6,23) }, uses: { 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + "ranges: { [6,23) }, uses: { 23 }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", // Environment use in HDeoptimize keeps even the non-reference argument alive. - "ranges: { [8,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + "ranges: { [8,21) }, uses: { }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", // Environment uses keep the reference argument alive. "ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", }; diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 094b75de69..58a35dde8e 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -31,11 +31,12 @@ namespace art { constexpr static bool kVerifyStackMaps = kIsDebugBuild; uint32_t StackMapStream::GetStackMapNativePcOffset(size_t i) { - return StackMap::UnpackNativePc(stack_maps_[i].packed_native_pc, instruction_set_); + return StackMap::UnpackNativePc(stack_maps_[i][StackMap::kPackedNativePc], instruction_set_); } void StackMapStream::SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) { - stack_maps_[i].packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_); + stack_maps_[i][StackMap::kPackedNativePc] = + StackMap::PackNativePc(native_pc_offset, instruction_set_); } void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, @@ -43,7 +44,8 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, uint32_t register_mask, BitVector* stack_mask, uint32_t num_dex_registers, - uint8_t inlining_depth) { + uint8_t inlining_depth, + StackMap::Kind kind) { DCHECK(!in_stack_map_) << "Mismatched Begin/End calls"; in_stack_map_ = true; // num_dex_registers_ is the constant per-method number of registers. @@ -54,19 +56,17 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, DCHECK_EQ(num_dex_registers_, num_dex_registers) << "Inconsistent register count"; } - current_stack_map_ = StackMapEntry { - .packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_), - .dex_pc = dex_pc, - .register_mask_index = kNoValue, - .stack_mask_index = kNoValue, - .inline_info_index = kNoValue, - .dex_register_mask_index = kNoValue, - .dex_register_map_index = kNoValue, - }; + current_stack_map_ = BitTableBuilder<StackMap::kCount>::Entry(); + current_stack_map_[StackMap::kKind] = static_cast<uint32_t>(kind); + current_stack_map_[StackMap::kPackedNativePc] = + StackMap::PackNativePc(native_pc_offset, instruction_set_); + current_stack_map_[StackMap::kDexPc] = dex_pc; if (register_mask != 0) { uint32_t shift = LeastSignificantBit(register_mask); - RegisterMaskEntry entry = { register_mask >> shift, shift }; - current_stack_map_.register_mask_index = register_masks_.Dedup(&entry); + BitTableBuilder<RegisterMask::kCount>::Entry entry; + entry[RegisterMask::kValue] = register_mask >> shift; + entry[RegisterMask::kShift] = shift; + current_stack_map_[StackMap::kRegisterMaskIndex] = register_masks_.Dedup(&entry); } // The compiler assumes the bit vector will be read during PrepareForFillIn(), // and it might modify the data before that. Therefore, just store the pointer. @@ -81,8 +81,17 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, // Create lambda method, which will be executed at the very end to verify data. // Parameters and local variables will be captured(stored) by the lambda "[=]". dchecks_.emplace_back([=](const CodeInfo& code_info) { + if (kind == StackMap::Kind::Default || kind == StackMap::Kind::OSR) { + StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, + instruction_set_); + CHECK_EQ(stack_map.Row(), stack_map_index); + } else if (kind == StackMap::Kind::Catch) { + StackMap stack_map = code_info.GetCatchStackMapForDexPc(dex_pc); + CHECK_EQ(stack_map.Row(), stack_map_index); + } StackMap stack_map = code_info.GetStackMapAt(stack_map_index); CHECK_EQ(stack_map.GetNativePcOffset(instruction_set_), native_pc_offset); + CHECK_EQ(stack_map.GetKind(), static_cast<uint32_t>(kind)); CHECK_EQ(stack_map.GetDexPc(), dex_pc); CHECK_EQ(code_info.GetRegisterMaskOf(stack_map), register_mask); BitMemoryRegion seen_stack_mask = code_info.GetStackMaskOf(stack_map); @@ -103,8 +112,8 @@ void StackMapStream::EndStackMapEntry() { // Generate index into the InlineInfo table. if (!current_inline_infos_.empty()) { - current_inline_infos_.back().is_last = InlineInfo::kLast; - current_stack_map_.inline_info_index = + current_inline_infos_.back()[InlineInfo::kIsLast] = InlineInfo::kLast; + current_stack_map_[StackMap::kInlineInfoIndex] = inline_infos_.Dedup(current_inline_infos_.data(), current_inline_infos_.size()); } @@ -114,18 +123,14 @@ void StackMapStream::EndStackMapEntry() { stack_maps_.Add(current_stack_map_); } -void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) { - current_dex_registers_.push_back(DexRegisterLocation(kind, value)); -} - void StackMapStream::AddInvoke(InvokeType invoke_type, uint32_t dex_method_index) { - uint32_t packed_native_pc = current_stack_map_.packed_native_pc; + uint32_t packed_native_pc = current_stack_map_[StackMap::kPackedNativePc]; size_t invoke_info_index = invoke_infos_.size(); - invoke_infos_.Add(InvokeInfoEntry { - .packed_native_pc = packed_native_pc, - .invoke_type = invoke_type, - .method_info_index = method_infos_.Dedup(&dex_method_index), - }); + BitTableBuilder<InvokeInfo::kCount>::Entry entry; + entry[InvokeInfo::kPackedNativePc] = packed_native_pc; + entry[InvokeInfo::kInvokeType] = invoke_type; + entry[InvokeInfo::kMethodInfoIndex] = method_infos_.Dedup({dex_method_index}); + invoke_infos_.Add(entry); if (kVerifyStackMaps) { dchecks_.emplace_back([=](const CodeInfo& code_info) { @@ -133,7 +138,7 @@ void StackMapStream::AddInvoke(InvokeType invoke_type, uint32_t dex_method_index CHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_), StackMap::UnpackNativePc(packed_native_pc, instruction_set_)); CHECK_EQ(invoke_info.GetInvokeType(), invoke_type); - CHECK_EQ(method_infos_[invoke_info.GetMethodInfoIndex()], dex_method_index); + CHECK_EQ(method_infos_[invoke_info.GetMethodInfoIndex()][0], dex_method_index); }); } } @@ -148,24 +153,20 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method, expected_num_dex_registers_ += num_dex_registers; - InlineInfoEntry entry = { - .is_last = InlineInfo::kMore, - .dex_pc = dex_pc, - .method_info_index = kNoValue, - .art_method_hi = kNoValue, - .art_method_lo = kNoValue, - .num_dex_registers = static_cast<uint32_t>(expected_num_dex_registers_), - }; + BitTableBuilder<InlineInfo::kCount>::Entry entry; + entry[InlineInfo::kIsLast] = InlineInfo::kMore; + entry[InlineInfo::kDexPc] = dex_pc; + entry[InlineInfo::kNumberOfDexRegisters] = static_cast<uint32_t>(expected_num_dex_registers_); if (EncodeArtMethodInInlineInfo(method)) { - entry.art_method_hi = High32Bits(reinterpret_cast<uintptr_t>(method)); - entry.art_method_lo = Low32Bits(reinterpret_cast<uintptr_t>(method)); + entry[InlineInfo::kArtMethodHi] = High32Bits(reinterpret_cast<uintptr_t>(method)); + entry[InlineInfo::kArtMethodLo] = Low32Bits(reinterpret_cast<uintptr_t>(method)); } else { if (dex_pc != static_cast<uint32_t>(-1) && kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile())); } uint32_t dex_method_index = method->GetDexMethodIndexUnchecked(); - entry.method_info_index = method_infos_.Dedup(&dex_method_index); + entry[InlineInfo::kMethodInfoIndex] = method_infos_.Dedup({dex_method_index}); } current_inline_infos_.push_back(entry); @@ -181,7 +182,7 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method, if (encode_art_method) { CHECK_EQ(inline_info.GetArtMethod(), method); } else { - CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()], + CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()][0], method->GetDexMethodIndexUnchecked()); } }); @@ -214,13 +215,13 @@ void StackMapStream::CreateDexRegisterMap() { // Distance is difference between this index and the index of last modification. uint32_t distance = stack_maps_.size() - dex_register_timestamp_[i]; if (previous_dex_registers_[i] != reg || distance > kMaxDexRegisterMapSearchDistance) { - DexRegisterEntry entry = DexRegisterEntry{ - .kind = static_cast<uint32_t>(reg.GetKind()), - .packed_value = DexRegisterInfo::PackValue(reg.GetKind(), reg.GetValue()), - }; + BitTableBuilder<DexRegisterInfo::kCount>::Entry entry; + entry[DexRegisterInfo::kKind] = static_cast<uint32_t>(reg.GetKind()); + entry[DexRegisterInfo::kPackedValue] = + DexRegisterInfo::PackValue(reg.GetKind(), reg.GetValue()); uint32_t index = reg.IsLive() ? dex_register_catalog_.Dedup(&entry) : kNoValue; temp_dex_register_mask_.SetBit(i); - temp_dex_register_map_.push_back(index); + temp_dex_register_map_.push_back({index}); previous_dex_registers_[i] = reg; dex_register_timestamp_[i] = stack_maps_.size(); } @@ -228,12 +229,12 @@ void StackMapStream::CreateDexRegisterMap() { // Set the mask and map for the current StackMap (which includes inlined registers). if (temp_dex_register_mask_.GetNumberOfBits() != 0) { - current_stack_map_.dex_register_mask_index = + current_stack_map_[StackMap::kDexRegisterMaskIndex] = dex_register_masks_.Dedup(temp_dex_register_mask_.GetRawStorage(), temp_dex_register_mask_.GetNumberOfBits()); } if (!current_dex_registers_.empty()) { - current_stack_map_.dex_register_map_index = + current_stack_map_[StackMap::kDexRegisterMapIndex] = dex_register_maps_.Dedup(temp_dex_register_map_.data(), temp_dex_register_map_.size()); } @@ -264,7 +265,7 @@ void StackMapStream::FillInMethodInfo(MemoryRegion region) { { MethodInfo info(region.begin(), method_infos_.size()); for (size_t i = 0; i < method_infos_.size(); ++i) { - info.SetMethodIndex(i, method_infos_[i]); + info.SetMethodIndex(i, method_infos_[i][0]); } } if (kVerifyStackMaps) { @@ -273,23 +274,19 @@ void StackMapStream::FillInMethodInfo(MemoryRegion region) { const size_t count = info.NumMethodIndices(); DCHECK_EQ(count, method_infos_.size()); for (size_t i = 0; i < count; ++i) { - DCHECK_EQ(info.GetMethodIndex(i), method_infos_[i]); + DCHECK_EQ(info.GetMethodIndex(i), method_infos_[i][0]); } } } size_t StackMapStream::PrepareForFillIn() { - static_assert(sizeof(StackMapEntry) == StackMap::kCount * sizeof(uint32_t), "Layout"); - static_assert(sizeof(InvokeInfoEntry) == InvokeInfo::kCount * sizeof(uint32_t), "Layout"); - static_assert(sizeof(InlineInfoEntry) == InlineInfo::kCount * sizeof(uint32_t), "Layout"); - static_assert(sizeof(DexRegisterEntry) == DexRegisterInfo::kCount * sizeof(uint32_t), "Layout"); DCHECK_EQ(out_.size(), 0u); // Read the stack masks now. The compiler might have updated them. for (size_t i = 0; i < lazy_stack_masks_.size(); i++) { BitVector* stack_mask = lazy_stack_masks_[i]; if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) { - stack_maps_[i].stack_mask_index = + stack_maps_[i][StackMap::kStackMaskIndex] = stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits()); } } diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 02fb6cb434..6842d9fd7e 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -27,11 +27,10 @@ #include "dex_register_location.h" #include "method_info.h" #include "nodes.h" +#include "stack_map.h" namespace art { -class CodeInfo; - /** * Collects and builds stack maps for a method. All the stack maps * for a method are placed in a CodeInfo object. @@ -53,6 +52,7 @@ class StackMapStream : public ValueObject { lazy_stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)), in_stack_map_(false), in_inline_info_(false), + current_stack_map_(), current_inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)), current_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)), previous_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)), @@ -66,10 +66,13 @@ class StackMapStream : public ValueObject { uint32_t register_mask, BitVector* sp_mask, uint32_t num_dex_registers, - uint8_t inlining_depth); + uint8_t inlining_depth, + StackMap::Kind kind = StackMap::Kind::Default); void EndStackMapEntry(); - void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value); + void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) { + current_dex_registers_.push_back(DexRegisterLocation(kind, value)); + } void AddInvoke(InvokeType type, uint32_t dex_method_index); @@ -97,69 +100,29 @@ class StackMapStream : public ValueObject { private: static constexpr uint32_t kNoValue = -1; - // The fields must be uint32_t and mirror the StackMap accessor in stack_map.h! - struct StackMapEntry { - uint32_t packed_native_pc; - uint32_t dex_pc; - uint32_t register_mask_index; - uint32_t stack_mask_index; - uint32_t inline_info_index; - uint32_t dex_register_mask_index; - uint32_t dex_register_map_index; - }; - - // The fields must be uint32_t and mirror the InlineInfo accessor in stack_map.h! - struct InlineInfoEntry { - uint32_t is_last; - uint32_t dex_pc; - uint32_t method_info_index; - uint32_t art_method_hi; - uint32_t art_method_lo; - uint32_t num_dex_registers; - }; - - // The fields must be uint32_t and mirror the InvokeInfo accessor in stack_map.h! - struct InvokeInfoEntry { - uint32_t packed_native_pc; - uint32_t invoke_type; - uint32_t method_info_index; - }; - - // The fields must be uint32_t and mirror the DexRegisterInfo accessor in stack_map.h! - struct DexRegisterEntry { - uint32_t kind; - uint32_t packed_value; - }; - - // The fields must be uint32_t and mirror the RegisterMask accessor in stack_map.h! - struct RegisterMaskEntry { - uint32_t value; - uint32_t shift; - }; - void CreateDexRegisterMap(); const InstructionSet instruction_set_; - BitTableBuilder<StackMapEntry> stack_maps_; - BitTableBuilder<RegisterMaskEntry> register_masks_; + BitTableBuilder<StackMap::kCount> stack_maps_; + BitTableBuilder<RegisterMask::kCount> register_masks_; BitmapTableBuilder stack_masks_; - BitTableBuilder<InvokeInfoEntry> invoke_infos_; - BitTableBuilder<InlineInfoEntry> inline_infos_; + BitTableBuilder<InvokeInfo::kCount> invoke_infos_; + BitTableBuilder<InlineInfo::kCount> inline_infos_; BitmapTableBuilder dex_register_masks_; - BitTableBuilder<uint32_t> dex_register_maps_; - BitTableBuilder<DexRegisterEntry> dex_register_catalog_; + BitTableBuilder<MaskInfo::kCount> dex_register_maps_; + BitTableBuilder<DexRegisterInfo::kCount> dex_register_catalog_; uint32_t num_dex_registers_ = 0; // TODO: Make this const and get the value in constructor. ScopedArenaVector<uint8_t> out_; - BitTableBuilder<uint32_t> method_infos_; + BitTableBuilder<1> method_infos_; ScopedArenaVector<BitVector*> lazy_stack_masks_; // Variables which track the current state between Begin/End calls; bool in_stack_map_; bool in_inline_info_; - StackMapEntry current_stack_map_; - ScopedArenaVector<InlineInfoEntry> current_inline_infos_; + BitTableBuilder<StackMap::kCount>::Entry current_stack_map_; + ScopedArenaVector<BitTableBuilder<InlineInfo::kCount>::Entry> current_inline_infos_; ScopedArenaVector<DexRegisterLocation> current_dex_registers_; ScopedArenaVector<DexRegisterLocation> previous_dex_registers_; ScopedArenaVector<uint32_t> dex_register_timestamp_; // Stack map index of last change. @@ -168,7 +131,7 @@ class StackMapStream : public ValueObject { // Temporary variables used in CreateDexRegisterMap. // They are here so that we can reuse the reserved memory. ArenaBitVector temp_dex_register_mask_; - ScopedArenaVector<uint32_t> temp_dex_register_map_; + ScopedArenaVector<BitTableBuilder<DexRegisterMapInfo::kCount>::Entry> temp_dex_register_map_; // A set of lambda functions to be executed at the end to verify // the encoded data. It is generally only used in debug builds. diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 9adc4c5ba6..fd856671ba 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -83,14 +83,14 @@ TEST(StackMapTest, Test1) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_TRUE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + ASSERT_EQ(Kind::kInStack, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind()); + ASSERT_EQ(0, dex_register_map[0].GetStackOffsetInBytes()); + ASSERT_EQ(-2, dex_register_map[1].GetConstant()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1); @@ -172,14 +172,14 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_TRUE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + ASSERT_EQ(Kind::kInStack, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind()); + ASSERT_EQ(0, dex_register_map[0].GetStackOffsetInBytes()); + ASSERT_EQ(-2, dex_register_map[1].GetConstant()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1); @@ -212,14 +212,14 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_TRUE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(18, dex_register_map.GetMachineRegister(0)); - ASSERT_EQ(3, dex_register_map.GetMachineRegister(1)); + ASSERT_EQ(Kind::kInRegister, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map[1].GetKind()); + ASSERT_EQ(18, dex_register_map[0].GetMachineRegister()); + ASSERT_EQ(3, dex_register_map[1].GetMachineRegister()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(2); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(3); @@ -245,14 +245,14 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_TRUE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(6, dex_register_map.GetMachineRegister(0)); - ASSERT_EQ(8, dex_register_map.GetMachineRegister(1)); + ASSERT_EQ(Kind::kInRegister, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map[1].GetKind()); + ASSERT_EQ(6, dex_register_map[0].GetMachineRegister()); + ASSERT_EQ(8, dex_register_map[1].GetMachineRegister()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(4); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(5); @@ -278,14 +278,14 @@ TEST(StackMapTest, Test2) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_TRUE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(3, dex_register_map.GetMachineRegister(0)); - ASSERT_EQ(1, dex_register_map.GetMachineRegister(1)); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map[1].GetKind()); + ASSERT_EQ(3, dex_register_map[0].GetMachineRegister()); + ASSERT_EQ(1, dex_register_map[1].GetMachineRegister()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(3); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(6); @@ -344,14 +344,14 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map)); ASSERT_EQ(number_of_dex_registers, map.size()); - ASSERT_TRUE(map.IsDexRegisterLive(0)); - ASSERT_TRUE(map.IsDexRegisterLive(1)); + ASSERT_TRUE(map[0].IsLive()); + ASSERT_TRUE(map[1].IsLive()); ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0)); - ASSERT_EQ(Kind::kConstant, map.GetLocationKind(1)); - ASSERT_EQ(0, map.GetStackOffsetInBytes(0)); - ASSERT_EQ(-2, map.GetConstant(1)); + ASSERT_EQ(Kind::kInStack, map[0].GetKind()); + ASSERT_EQ(Kind::kConstant, map[1].GetKind()); + ASSERT_EQ(0, map[0].GetStackOffsetInBytes()); + ASSERT_EQ(-2, map[1].GetConstant()); DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1); @@ -396,13 +396,13 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ASSERT_TRUE(stack_map.HasDexRegisterMap()); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map); ASSERT_EQ(number_of_dex_registers, dex_register_map.size()); - ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0)); - ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1)); + ASSERT_FALSE(dex_register_map[0].IsLive()); + ASSERT_TRUE(dex_register_map[1].IsLive()); ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters()); - ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(0)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + ASSERT_EQ(Kind::kNone, dex_register_map[0].GetKind()); + ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind()); + ASSERT_EQ(-2, dex_register_map[1].GetConstant()); DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(0); ASSERT_EQ(Kind::kConstant, location1.GetKind()); @@ -425,12 +425,12 @@ TEST(StackMapTest, TestShareDexRegisterMap) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); // Second stack map, which should share the same dex register map. - stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 65 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); // Third stack map (doesn't share the dex register map). - stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 66 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 2); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); @@ -446,22 +446,22 @@ TEST(StackMapTest, TestShareDexRegisterMap) { StackMap sm0 = ci.GetStackMapAt(0); DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0); ASSERT_EQ(number_of_dex_registers, dex_registers0.size()); - ASSERT_EQ(0, dex_registers0.GetMachineRegister(0)); - ASSERT_EQ(-2, dex_registers0.GetConstant(1)); + ASSERT_EQ(0, dex_registers0[0].GetMachineRegister()); + ASSERT_EQ(-2, dex_registers0[1].GetConstant()); // Verify second stack map. StackMap sm1 = ci.GetStackMapAt(1); DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1); ASSERT_EQ(number_of_dex_registers, dex_registers1.size()); - ASSERT_EQ(0, dex_registers1.GetMachineRegister(0)); - ASSERT_EQ(-2, dex_registers1.GetConstant(1)); + ASSERT_EQ(0, dex_registers1[0].GetMachineRegister()); + ASSERT_EQ(-2, dex_registers1[1].GetConstant()); // Verify third stack map. StackMap sm2 = ci.GetStackMapAt(2); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2); ASSERT_EQ(number_of_dex_registers, dex_registers2.size()); - ASSERT_EQ(2, dex_registers2.GetMachineRegister(0)); - ASSERT_EQ(-2, dex_registers2.GetConstant(1)); + ASSERT_EQ(2, dex_registers2[0].GetMachineRegister()); + ASSERT_EQ(-2, dex_registers2[1].GetConstant()); // Verify dex register mask offsets. ASSERT_FALSE(sm1.HasDexRegisterMaskIndex()); // No delta. @@ -528,7 +528,7 @@ TEST(StackMapTest, InlineTest) { sp_mask1.SetBit(4); // First stack map. - stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, 2, 2); + stream.BeginStackMapEntry(0, 10 * kPcAlign, 0x3, &sp_mask1, 2, 2); stream.AddDexRegisterEntry(Kind::kInStack, 0); stream.AddDexRegisterEntry(Kind::kConstant, 4); @@ -597,8 +597,8 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0); ASSERT_EQ(2u, dex_registers0.size()); - ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0)); - ASSERT_EQ(4, dex_registers0.GetConstant(1)); + ASSERT_EQ(0, dex_registers0[0].GetStackOffsetInBytes()); + ASSERT_EQ(4, dex_registers0[1].GetConstant()); InlineInfo if0_0 = ci.GetInlineInfoAtDepth(sm0, 0); InlineInfo if0_1 = ci.GetInlineInfoAtDepth(sm0, 1); @@ -610,13 +610,13 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm0); ASSERT_EQ(1u, dex_registers1.size()); - ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0)); + ASSERT_EQ(8, dex_registers1[0].GetStackOffsetInBytes()); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm0); ASSERT_EQ(3u, dex_registers2.size()); - ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0)); - ASSERT_EQ(20, dex_registers2.GetConstant(1)); - ASSERT_EQ(15, dex_registers2.GetMachineRegister(2)); + ASSERT_EQ(16, dex_registers2[0].GetStackOffsetInBytes()); + ASSERT_EQ(20, dex_registers2[1].GetConstant()); + ASSERT_EQ(15, dex_registers2[2].GetMachineRegister()); } { @@ -625,8 +625,8 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1); ASSERT_EQ(2u, dex_registers0.size()); - ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0)); - ASSERT_EQ(0, dex_registers0.GetConstant(1)); + ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes()); + ASSERT_EQ(0, dex_registers0[1].GetConstant()); InlineInfo if1_0 = ci.GetInlineInfoAtDepth(sm1, 0); InlineInfo if1_1 = ci.GetInlineInfoAtDepth(sm1, 1); @@ -641,13 +641,13 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm1); ASSERT_EQ(1u, dex_registers1.size()); - ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0)); + ASSERT_EQ(12, dex_registers1[0].GetStackOffsetInBytes()); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm1); ASSERT_EQ(3u, dex_registers2.size()); - ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0)); - ASSERT_EQ(10, dex_registers2.GetConstant(1)); - ASSERT_EQ(5, dex_registers2.GetMachineRegister(2)); + ASSERT_EQ(80, dex_registers2[0].GetStackOffsetInBytes()); + ASSERT_EQ(10, dex_registers2[1].GetConstant()); + ASSERT_EQ(5, dex_registers2[2].GetMachineRegister()); } { @@ -656,8 +656,8 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2); ASSERT_EQ(2u, dex_registers0.size()); - ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0)); - ASSERT_EQ(4, dex_registers0.GetConstant(1)); + ASSERT_FALSE(dex_registers0[0].IsLive()); + ASSERT_EQ(4, dex_registers0[1].GetConstant()); ASSERT_FALSE(sm2.HasInlineInfo()); } @@ -667,8 +667,8 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3); ASSERT_EQ(2u, dex_registers0.size()); - ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0)); - ASSERT_EQ(0, dex_registers0.GetConstant(1)); + ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes()); + ASSERT_EQ(0, dex_registers0[1].GetConstant()); InlineInfo if2_0 = ci.GetInlineInfoAtDepth(sm3, 0); InlineInfo if2_1 = ci.GetInlineInfoAtDepth(sm3, 1); @@ -683,12 +683,12 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, sm3); ASSERT_EQ(1u, dex_registers1.size()); - ASSERT_EQ(2, dex_registers1.GetMachineRegister(0)); + ASSERT_EQ(2, dex_registers1[0].GetMachineRegister()); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, sm3); ASSERT_EQ(2u, dex_registers2.size()); - ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0)); - ASSERT_EQ(3, dex_registers2.GetMachineRegister(1)); + ASSERT_FALSE(dex_registers2[0].IsLive()); + ASSERT_EQ(3, dex_registers2[1].GetMachineRegister()); } } |