diff options
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 143 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.h | 6 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 11 | ||||
| -rw-r--r-- | test/Android.arm_vixl.mk | 14 |
4 files changed, 159 insertions, 15 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index a31adbf4ac..1f814abf06 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -4161,7 +4161,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { vixl32::Register temp = temps.Acquire(); if (has_intermediate_address) { - TODO_VIXL32(FATAL); + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset); + } + temp = obj; } else { __ Add(temp, obj, data_offset); } @@ -4206,7 +4213,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { vixl32::Register temp = temps.Acquire(); if (has_intermediate_address) { - TODO_VIXL32(FATAL); + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset); + } + temp = obj; } else { __ Add(temp, obj, data_offset); } @@ -4334,7 +4348,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { vixl32::Register temp = temps.Acquire(); if (has_intermediate_address) { - TODO_VIXL32(FATAL); + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == data_offset); + } + temp = array; } else { __ Add(temp, array, data_offset); } @@ -4553,6 +4574,32 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayLength(HArrayLength* instruction } } +void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) { + // The read barrier instrumentation does not support the HIntermediateAddress instruction yet. + DCHECK(!kEmitCompilerReadBarrier); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset())); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) { + vixl32::Register out = OutputRegister(instruction); + vixl32::Register first = InputRegisterAt(instruction, 0); + Location second = instruction->GetLocations()->InAt(1); + + // The read barrier instrumentation does not support the HIntermediateAddress instruction yet. + DCHECK(!kEmitCompilerReadBarrier); + + if (second.IsRegister()) { + __ Add(out, first, RegisterFrom(second)); + } else { + __ Add(out, first, second.GetConstant()->AsIntConstant()->GetValue()); + } +} + void LocationsBuilderARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) { RegisterSet caller_saves = RegisterSet::Empty(); InvokeRuntimeCallingConventionARMVIXL calling_convention; @@ -5488,6 +5535,70 @@ void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); } +void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + DCHECK(instruction->GetResultType() == Primitive::kPrimInt + || instruction->GetResultType() == Primitive::kPrimLong); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + Location out = locations->Out(); + + if (instruction->GetResultType() == Primitive::kPrimInt) { + vixl32::Register first_reg = RegisterFrom(first); + vixl32::Register second_reg = RegisterFrom(second); + vixl32::Register out_reg = RegisterFrom(out); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(out_reg, first_reg, second_reg); + break; + case HInstruction::kOr: + __ Orn(out_reg, first_reg, second_reg); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + return; + + } else { + DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + vixl32::Register first_low = LowRegisterFrom(first); + vixl32::Register first_high = HighRegisterFrom(first); + vixl32::Register second_low = LowRegisterFrom(second); + vixl32::Register second_high = HighRegisterFrom(second); + vixl32::Register out_low = LowRegisterFrom(out); + vixl32::Register out_high = HighRegisterFrom(out); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(out_low, first_low, second_low); + __ Bic(out_high, first_high, second_high); + break; + case HInstruction::kOr: + __ Orn(out_low, first_low, second_low); + __ Orn(out_high, first_high, second_high); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + } +} + // TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl. void InstructionCodeGeneratorARMVIXL::GenerateAndConst(vixl32::Register out, vixl32::Register first, @@ -5855,6 +5966,32 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location __ Blx(lr); } +void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); + locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, + Location::RequiresRegister()); + locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); + locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + vixl32::Register res = OutputRegister(instr); + vixl32::Register accumulator = + InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex); + vixl32::Register mul_left = + InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex); + vixl32::Register mul_right = + InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex); + + if (instr->GetOpKind() == HInstruction::kAdd) { + __ Mla(res, mul_left, mul_right, accumulator); + } else { + __ Mls(res, mul_left, mul_right, accumulator); + } +} + void LocationsBuilderARMVIXL::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { // Nothing to do, this should be removed during prepare for register allocator. LOG(FATAL) << "Unreachable"; diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index ccd866c367..e8bc2a9b88 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -112,6 +112,7 @@ class LoadClassSlowPathARMVIXL; M(ArraySet) \ M(Below) \ M(BelowOrEqual) \ + M(BitwiseNegatedRight) \ M(BooleanNot) \ M(BoundsCheck) \ M(BoundType) \ @@ -136,6 +137,7 @@ class LoadClassSlowPathARMVIXL; M(InstanceFieldSet) \ M(InstanceOf) \ M(IntConstant) \ + M(IntermediateAddress) \ M(InvokeInterface) \ M(InvokeStaticOrDirect) \ M(InvokeUnresolved) \ @@ -149,6 +151,7 @@ class LoadClassSlowPathARMVIXL; M(MemoryBarrier) \ M(MonitorOperation) \ M(Mul) \ + M(MultiplyAccumulate) \ M(NativeDebugInfo) \ M(Neg) \ M(NewArray) \ @@ -186,9 +189,6 @@ class LoadClassSlowPathARMVIXL; // TODO: Remove once the VIXL32 backend is implemented completely. #define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ M(ArmDexCacheArraysBase) \ - M(BitwiseNegatedRight) \ - M(IntermediateAddress) \ - M(MultiplyAccumulate) \ class CodeGeneratorARMVIXL; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 7a930cce71..499514de97 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -624,17 +624,14 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, UNUSED(codegen); // To avoid compilation error when compiling for svelte OptimizingCompilerStats* stats = compilation_stats_.get(); ArenaAllocator* arena = graph->GetArena(); -#ifdef ART_USE_VIXL_ARM_BACKEND - UNUSED(arena); - UNUSED(pass_observer); - UNUSED(stats); -#endif switch (instruction_set) { -#if defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_USE_VIXL_ARM_BACKEND) +#if defined(ART_ENABLE_CODEGEN_arm) case kThumb2: case kArm: { +#ifndef ART_USE_VIXL_ARM_BACKEND arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, codegen, stats); +#endif arm::InstructionSimplifierArm* simplifier = new (arena) arm::InstructionSimplifierArm(graph, stats); SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); @@ -643,7 +640,9 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, simplifier, side_effects, gvn, +#ifndef ART_USE_VIXL_ARM_BACKEND fixups +#endif }; RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer); break; diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk index 8ca4168563..21b31b40b7 100644 --- a/test/Android.arm_vixl.mk +++ b/test/Android.arm_vixl.mk @@ -19,25 +19,33 @@ TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \ 003-omnibus-opcodes \ 020-string \ 021-string2 \ + 042-new-instance \ 044-proxy \ + 080-oom-throw \ 082-inline-execute \ 096-array-copy-concurrent-gc \ + 099-vmdebug \ 100-reflect2 \ 103-string-append \ + 114-ParallelGC \ 122-npe \ 129-ThreadGetId \ 137-cfi \ + 144-static-field-sigquit \ + 412-new-array \ 439-npe \ + 450-checker-types \ 488-checker-inline-recursive-calls \ + 515-dce-dominator \ 520-equivalent-phi \ 525-checker-arrays-fields1 \ 525-checker-arrays-fields2 \ 527-checker-array-access-split \ 538-checker-embed-constants \ - 550-checker-multiply-accumulate \ 552-checker-sharpening \ 562-checker-no-intermediate \ - 564-checker-negbitwise \ 570-checker-osr \ - 602-deoptimizeable + 602-deoptimizeable \ + 700-LoadArgRegs \ + 800-smali \ |