Revert^2 "Improve ArraySet codegen."
This reverts commit 0dda8c84938d6bb4ce5a1707e5e109ea187fc33d.
The original change had two issues that have been fixed.
First, for heap poisoning, the null branch skipped over the
reference poisoning instructions which copy and poison the
value, thus writing whatever was left in the register.
Second, the change erroneously assumed that the slow path
performed only the assignability check and bound the slow
path exit label before the actual array store, unnecessarily
re-doing the store.
Change-Id: I9f380efa12aa807b4f566a932dbc9dae824fb25a
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: aosp_taimen-userdebug boots.
Test: testrunner.py --target --optimizing
Test: Repeat the above with
ART_USE_READ_BARRIER=false ART_HEAP_POISONING=true
Bug: 32489401
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7adfe5d..19d04c9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -6177,13 +6177,11 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+ bool needs_type_check = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
- may_need_runtime_call_for_type_check ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -6204,7 +6202,7 @@
vixl32::Register array = InputRegisterAt(instruction, 0);
Location index = locations->InAt(1);
DataType::Type value_type = instruction->GetComponentType();
- bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+ bool needs_type_check = instruction->NeedsTypeCheck();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
uint32_t data_offset =
@@ -6256,8 +6254,7 @@
if (instruction->InputAt(2)->IsNullConstant()) {
// Just setting null.
if (index.IsConstant()) {
- size_t offset =
- (Int32ConstantFrom(index) << TIMES_4) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
@@ -6270,7 +6267,7 @@
// store instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
DCHECK(!needs_write_barrier);
- DCHECK(!may_need_runtime_call_for_type_check);
+ DCHECK(!needs_type_check);
break;
}
@@ -6279,36 +6276,21 @@
vixl32::Register temp1 = RegisterFrom(temp1_loc);
Location temp2_loc = locations->GetTemp(1);
vixl32::Register temp2 = RegisterFrom(temp2_loc);
- uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
- uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
- vixl32::Label done;
- vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
- SlowPathCodeARMVIXL* slow_path = nullptr;
- if (may_need_runtime_call_for_type_check) {
+ bool can_value_be_null = instruction->GetValueCanBeNull();
+ vixl32::Label do_store;
+ if (can_value_be_null) {
+ __ CompareAndBranchIfZero(value, &do_store, /* is_far_target= */ false);
+ }
+
+ SlowPathCodeARMVIXL* slow_path = nullptr;
+ if (needs_type_check) {
slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
- if (instruction->GetValueCanBeNull()) {
- vixl32::Label non_zero;
- __ CompareAndBranchIfNonZero(value, &non_zero);
- if (index.IsConstant()) {
- size_t offset =
- (Int32ConstantFrom(index) << TIMES_4) + data_offset;
- GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
- } else {
- DCHECK(index.IsRegister()) << index;
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- __ Add(temp, array, data_offset);
- codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
- }
- // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
- // store instruction.
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ B(final_label);
- __ Bind(&non_zero);
- }
+
+ const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
// Note that when read barriers are enabled, the type checks
// are performed without read barriers. This is fine, even in
@@ -6355,6 +6337,13 @@
}
}
+ codegen_->MarkGCCard(temp1, temp2, array, value, /* can_be_null= */ false);
+
+ if (can_value_be_null) {
+ DCHECK(do_store.IsReferenced());
+ __ Bind(&do_store);
+ }
+
vixl32::Register source = value;
if (kPoisonHeapReferences) {
// Note that in the case where `value` is a null reference,
@@ -6367,8 +6356,7 @@
}
if (index.IsConstant()) {
- size_t offset =
- (Int32ConstantFrom(index) << TIMES_4) + data_offset;
+ size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
@@ -6382,18 +6370,12 @@
RegisterFrom(index));
}
- if (!may_need_runtime_call_for_type_check) {
+ if (can_value_be_null || !needs_type_check) {
// TODO(VIXL): Ensure we record the pc position immediately after the preceding store
// instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
- codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
-
- if (done.IsReferenced()) {
- __ Bind(&done);
- }
-
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}