ARM: VIXL32: Fix crash in Exchange for stack slots.
In ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2)
scratch general purpose register was used without any spilling
(like in StoreToOffset) which led to lack of scratch register
for VLDR with big offset. Now it uses two scratch S-registers.
Test: ART_USE_VIXL_ARM_BACKEND=true m test-art-host
Test: ART_USE_VIXL_ARM_BACKEND=true m test-art-target
Change-Id: I0416a69e281d09a04dd1689efa5a8c1994c82638
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index ac83bd9..879b4ce 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -1041,6 +1041,31 @@
}
}
+#ifdef ART_ENABLE_CODEGEN_arm
+TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features(
+ ArmInstructionSetFeatures::FromCppDefines());
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+ arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
+
+ codegen.Initialize();
+
+ // This will result in calling EmitSwap -> void ParallelMoveResolverARMVIXL::Exchange(int mem1,
+ // int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were
+ // used as temps; however GPR scratch register is required for big stack offsets which don't fit
+ // LDR encoding. So the following code is a regression test for that situation.
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), Primitive::kPrimInt, nullptr);
+ move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), Primitive::kPrimInt, nullptr);
+ codegen.GetMoveResolver()->EmitNativeCode(move);
+
+ InternalCodeAllocator code_allocator;
+ codegen.Finalize(&code_allocator);
+}
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(