Don't use VIXL's temp registers in LocationsBuilderARM64::HandleFieldGet.
Before this CL, when emitting a volatile field load with a
large offset, it was possible to deplete the pool of VIXL
ARM64 temporary registers (IP0, IP1) in the concurrent
collector configuration. To avoid this, we now request a
temporary register from the register allocator instead.
Test: m test-art-target-run-test-635-checker-arm64-volatile-load-cc
Bug: 34726333
Change-Id: Idf73a0306142c6133e259783aacaf7ad5401a2fd
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9762ee8..e685d59 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1903,6 +1903,9 @@
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -1930,11 +1933,9 @@
if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object FieldGet with Baker's read barrier case.
- MacroAssembler* masm = GetVIXLAssembler();
- UseScratchRegisterScope temps(masm);
// /* HeapReference<Object> */ out = *(base + offset)
Register base = RegisterFrom(base_loc, Primitive::kPrimNot);
- Register temp = temps.AcquireW();
+ Register temp = WRegisterFrom(locations->GetTemp(0));
// Note that potential implicit null checks are handled in this
// CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(