diff options
author | 2019-09-09 14:52:12 +0100 | |
---|---|---|
committer | 2019-10-10 13:06:08 +0100 | |
commit | 98416bf06592493ee6fde039af5eaa5efab73acc (patch) | |
tree | a0052ec5364ce1068639a9b7d7355683eb691371 /compiler/optimizing | |
parent | 63b0c26aae3e7237166dd781eb7a15fbc7c091c2 (diff) |
Fix uses of MaybeRecordImplicitNullCheck without special scopes
MaybeRecordImplicitNullCheck is a function which uses
CodeGenerator::RecordPcInfo() and requires an exact PC. However for ARM32/ARM64,
when CodeGenerator::RecordPcInfo() is used without VIXL special scopes (EmissionCheckScope,
ExactAssemblyScope) there is no guarantee of an exact PC. Without the special scopes VIXL might
emit veneer/literal pools affecting a PC.
The ARM32 code generator has uses of MaybeRecordImplicitNullCheck without the
special scopes.
This CL fixes missing special scopes in the ARM32/ARM64 code generators.
It also changes API to prevent such cases:
1. A variant of CodeGenerator::RecordPcInfo with native_pc as a
parameter is added. The old variant (where Assembler::CodePosition is used) is
kept and documented that Assembler::CodePosition is target-dependent and
might be imprecise.
2. CodeGenerator::MaybeRecordImplicitNullCheck is made virtual. Checks
are added to ARM32/ARM64 code generators that
MaybeRecordImplicitNullCheck is invoked within VIXL special scopes.
Test: test.py --host --optimizing --jit --gtest
Test: test.py --target --optimizing --jit
Test: run-gtests.sh
Change-Id: Ic66c16e7bdf4751cbc19a9de05846fba005b7f55
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 13 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 20 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 9 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 200 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.h | 9 |
5 files changed, 160 insertions, 91 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 6d3a5c6a73..bef7169da1 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1116,6 +1116,14 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path, bool native_debug_info) { + RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info); +} + +void CodeGenerator::RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + uint32_t native_pc, + SlowPathCode* slow_path, + bool native_debug_info) { if (instruction != nullptr) { // The code generated for some type conversions // may call the runtime, thus normally requiring a subsequent @@ -1139,9 +1147,6 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } } - // Collect PC infos for the mapping table. - uint32_t native_pc = GetAssembler()->CodePosition(); - StackMapStream* stack_map_stream = GetStackMapStream(); if (instruction == nullptr) { // For stack overflow checks and native-debug-info entries without dex register @@ -1493,7 +1498,7 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { HNullCheck* null_check = instr->GetImplicitNullCheck(); if (null_check != nullptr) { - RecordPcInfo(null_check, null_check->GetDexPc()); + RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition()); } } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 917d97de1b..9e3e454f3d 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -331,20 +331,36 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { return GetFrameSize() - FrameEntrySpillSize() - kShouldDeoptimizeFlagSize; } - // Record native to dex mapping for a suspend point. Required by runtime. + // Record native to dex mapping for a suspend point. Required by runtime. void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, + uint32_t native_pc, SlowPathCode* slow_path = nullptr, bool native_debug_info = false); + + // Record native to dex mapping for a suspend point. + // The native_pc is used from Assembler::CodePosition. + // + // Note: As Assembler::CodePosition is target dependent, it does not guarantee the exact native_pc + // for the instruction. If the exact native_pc is required it must be provided explicitly. + void RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr, + bool native_debug_info = false); + // Check whether we have already recorded mapping at this PC. bool HasStackMapAtCurrentPc(); + // Record extra stack maps if we support native debugging. + // + // ARM specific behaviour: The recorded native PC might be a branch over pools to instructions + // corresponding the dex PC. void MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr); bool CanMoveNullCheckToUser(HNullCheck* null_check); - void MaybeRecordImplicitNullCheck(HInstruction* instruction); + virtual void MaybeRecordImplicitNullCheck(HInstruction* instruction); LocationSummary* CreateThrowingSlowPathLocations( HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty()); void GenerateNullCheck(HNullCheck* null_check); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index e477f7cf63..1a9b7006dc 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -777,6 +777,15 @@ class CodeGeneratorARM64 : public CodeGenerator { void GenerateImplicitNullCheck(HNullCheck* instruction) override; void GenerateExplicitNullCheck(HNullCheck* instruction) override; + void MaybeRecordImplicitNullCheck(HInstruction* instr) final { + // The function must be only called within special scopes + // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of + // veneer/literal pools by VIXL assembler. + CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true) + << "The function must only be called within EmissionCheckScope or ExactAssemblyScope"; + CodeGenerator::MaybeRecordImplicitNullCheck(instr); + } + private: // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 4cae49709f..1c69dd685b 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -68,6 +68,7 @@ using helpers::RegisterFrom; using helpers::SRegisterFrom; using helpers::Uint64ConstantFrom; +using vixl::EmissionCheckScope; using vixl::ExactAssemblyScope; using vixl::CodeBufferCheckScope; @@ -5443,24 +5444,29 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, case DataType::Type::kUint16: case DataType::Type::kInt16: case DataType::Type::kInt32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); StoreOperandType operand_type = GetStoreOperandType(field_type); GetAssembler()->StoreToOffset(operand_type, RegisterFrom(value), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } case DataType::Type::kReference: { + vixl32::Register value_reg = RegisterFrom(value); if (kPoisonHeapReferences && needs_write_barrier) { // Note that in the case where `value` is a null reference, // we do not enter this block, as a null reference does not // need poisoning. DCHECK_EQ(field_type, DataType::Type::kReference); - vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); - __ Mov(temp, RegisterFrom(value)); - GetAssembler()->PoisonHeapReference(temp); - GetAssembler()->StoreToOffset(kStoreWord, temp, base, offset); - } else { - GetAssembler()->StoreToOffset(kStoreWord, RegisterFrom(value), base, offset); + value_reg = RegisterFrom(locations->GetTemp(0)); + __ Mov(value_reg, RegisterFrom(value)); + GetAssembler()->PoisonHeapReference(value_reg); } + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreToOffset(kStoreWord, value_reg, base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -5474,6 +5480,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, RegisterFrom(locations->GetTemp(1)), instruction); } else { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), base, offset); codegen_->MaybeRecordImplicitNullCheck(instruction); } @@ -5481,7 +5489,10 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, } case DataType::Type::kFloat32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->StoreSToOffset(SRegisterFrom(value), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -5501,6 +5512,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, RegisterFrom(locations->GetTemp(3)), instruction); } else { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->StoreDToOffset(value_reg, base, offset); codegen_->MaybeRecordImplicitNullCheck(instruction); } @@ -5514,16 +5527,6 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, UNREACHABLE(); } - // Longs and doubles are handled in the switch. - if (field_type != DataType::Type::kInt64 && field_type != DataType::Type::kFloat64) { - // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we - // should use a scope and the assembler to emit the store instruction to guarantee that we - // record the pc at the correct position. But the `Assembler` does not automatically handle - // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time - // of writing, do generate the store instruction last. - codegen_->MaybeRecordImplicitNullCheck(instruction); - } - if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); vixl32::Register card = RegisterFrom(locations->GetTemp(1)); @@ -5686,8 +5689,11 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, case DataType::Type::kUint16: case DataType::Type::kInt16: case DataType::Type::kInt32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); LoadOperandType operand_type = GetLoadOperandType(load_type); GetAssembler()->LoadFromOffset(operand_type, RegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -5703,8 +5709,12 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } } else { - GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset); - codegen_->MaybeRecordImplicitNullCheck(instruction); + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } if (is_volatile) { codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -5716,26 +5726,34 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, break; } - case DataType::Type::kInt64: + case DataType::Type::kInt64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); if (is_volatile && !atomic_ldrd_strd) { GenerateWideAtomicLoad(base, offset, LowRegisterFrom(out), HighRegisterFrom(out)); } else { GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out), base, offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; + } - case DataType::Type::kFloat32: + case DataType::Type::kFloat32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->LoadSFromOffset(SRegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); break; + } case DataType::Type::kFloat64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); vixl32::DRegister out_dreg = DRegisterFrom(out); if (is_volatile && !atomic_ldrd_strd) { vixl32::Register lo = RegisterFrom(locations->GetTemp(0)); vixl32::Register hi = RegisterFrom(locations->GetTemp(1)); GenerateWideAtomicLoad(base, offset, lo, hi); - // TODO(VIXL): Do we need to be immediately after the ldrexd instruction? If so we need a - // scope. codegen_->MaybeRecordImplicitNullCheck(instruction); __ Vmov(out_dreg, lo, hi); } else { @@ -5752,19 +5770,6 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, UNREACHABLE(); } - if (load_type == DataType::Type::kReference || load_type == DataType::Type::kFloat64) { - // Potential implicit null checks, in the case of reference or - // double fields, are handled in the previous switch statement. - } else { - // Address cases other than reference and double that may require an implicit null check. - // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we - // should use a scope and the assembler to emit the load instruction to guarantee that we - // record the pc at the correct position. But the `Assembler` does not automatically handle - // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time - // of writing, do generate the store instruction last. - codegen_->MaybeRecordImplicitNullCheck(instruction); - } - if (is_volatile) { if (load_type == DataType::Type::kReference) { // Memory barriers, in the case of references, are also handled @@ -6052,6 +6057,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { if (maybe_compressed_char_at) { length = RegisterFrom(locations->GetTemp(0)); uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->LoadFromOffset(kLoadWord, length, obj, count_offset); codegen_->MaybeRecordImplicitNullCheck(instruction); } @@ -6080,8 +6087,11 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { } else { uint32_t full_offset = data_offset + (const_index << DataType::SizeShift(type)); + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); LoadOperandType load_type = GetLoadOperandType(type); GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); } } else { UseScratchRegisterScope temps(GetVIXLAssembler()); @@ -6114,7 +6124,10 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Bind(&done); } } else { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index)); + codegen_->MaybeRecordImplicitNullCheck(instruction); } } break; @@ -6154,15 +6167,13 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { } else { vixl32::Register out = OutputRegister(instruction); if (index.IsConstant()) { - size_t offset = - (Int32ConstantFrom(index) << TIMES_4) + data_offset; - GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset); - // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, - // we should use a scope and the assembler to emit the load instruction to guarantee that - // we record the pc at the correct position. But the `Assembler` does not automatically - // handle unencodable offsets. Practically, everything is fine because the helper and - // VIXL, at the time of writing, do generate the store instruction last. - codegen_->MaybeRecordImplicitNullCheck(instruction); + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } // If read barriers are enabled, emit read barriers other than // Baker's using a slow path (and also unpoison the loaded // reference, if heap poisoning is enabled). @@ -6183,12 +6194,13 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { } else { __ Add(temp, obj, data_offset); } - codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index)); - temps.Close(); - // TODO(VIXL): Use a scope to ensure that we record the pc position immediately after the - // load instruction. Practically, everything is fine because the helper and VIXL, at the - // time of writing, do generate the store instruction last. - codegen_->MaybeRecordImplicitNullCheck(instruction); + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index)); + temps.Close(); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } // If read barriers are enabled, emit read barriers other than // Baker's using a slow path (and also unpoison the loaded // reference, if heap poisoning is enabled). @@ -6200,6 +6212,9 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { } case DataType::Type::kInt64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; @@ -6210,10 +6225,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } case DataType::Type::kFloat32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); vixl32::SRegister out = SRegisterFrom(out_loc); if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; @@ -6224,10 +6243,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4)); GetAssembler()->LoadSFromOffset(out, temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } case DataType::Type::kFloat64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset); @@ -6237,6 +6260,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -6246,15 +6270,6 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - - if (type == DataType::Type::kReference) { - // Potential implicit null checks, in the case of reference - // arrays, are handled in the previous switch statement. - } else if (!maybe_compressed_char_at) { - // TODO(VIXL): Use a scope to ensure we record the pc info immediately after - // the preceding load instruction. - codegen_->MaybeRecordImplicitNullCheck(instruction); - } } void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) { @@ -6308,7 +6323,10 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { uint32_t full_offset = data_offset + (const_index << DataType::SizeShift(value_type)); StoreOperandType store_type = GetStoreOperandType(value_type); + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); } else { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); @@ -6325,7 +6343,10 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { } else { __ Add(temp, array, data_offset); } + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index)); + codegen_->MaybeRecordImplicitNullCheck(instruction); } break; } @@ -6337,6 +6358,9 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { DCHECK(!has_intermediate_address); if (instruction->InputAt(2)->IsNullConstant()) { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); // Just setting null. if (index.IsConstant()) { size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; @@ -6348,8 +6372,6 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { __ Add(temp, array, data_offset); codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index)); } - // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding - // store instruction. codegen_->MaybeRecordImplicitNullCheck(instruction); DCHECK(!needs_write_barrier); DCHECK(!needs_type_check); @@ -6440,25 +6462,28 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { source = temp1; } - if (index.IsConstant()) { - size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; - GetAssembler()->StoreToOffset(kStoreWord, source, array, offset); - } else { - DCHECK(index.IsRegister()) << index; + { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + GetAssembler()->StoreToOffset(kStoreWord, source, array, offset); + } else { + DCHECK(index.IsRegister()) << index; - UseScratchRegisterScope temps(GetVIXLAssembler()); - vixl32::Register temp = temps.Acquire(); - __ Add(temp, array, data_offset); - codegen_->StoreToShiftedRegOffset(value_type, - LocationFrom(source), - temp, - RegisterFrom(index)); - } + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, data_offset); + codegen_->StoreToShiftedRegOffset(value_type, + LocationFrom(source), + temp, + RegisterFrom(index)); + } - if (can_value_be_null || !needs_type_check) { - // TODO(VIXL): Ensure we record the pc position immediately after the preceding store - // instruction. - codegen_->MaybeRecordImplicitNullCheck(instruction); + if (can_value_be_null || !needs_type_check) { + codegen_->MaybeRecordImplicitNullCheck(instruction); + } } if (slow_path != nullptr) { @@ -6469,6 +6494,9 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { } case DataType::Type::kInt64: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); Location value = locations->InAt(2); if (index.IsConstant()) { size_t offset = @@ -6480,10 +6508,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } case DataType::Type::kFloat32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); Location value = locations->InAt(2); DCHECK(value.IsFpuRegister()); if (index.IsConstant()) { @@ -6495,10 +6527,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4)); GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } case DataType::Type::kFloat64: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); Location value = locations->InAt(2); DCHECK(value.IsFpuRegisterPair()); if (index.IsConstant()) { @@ -6510,6 +6546,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -6519,13 +6556,6 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { LOG(FATAL) << "Unreachable type " << value_type; UNREACHABLE(); } - - // Objects are handled in the switch. - if (value_type != DataType::Type::kReference) { - // TODO(VIXL): Ensure we record the pc position immediately after the preceding store - // instruction. - codegen_->MaybeRecordImplicitNullCheck(instruction); - } } void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) { diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index b541351848..fae615d030 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -747,6 +747,15 @@ class CodeGeneratorARMVIXL : public CodeGenerator { vixl::aarch32::Register in, vixl::aarch32::Register temp = vixl32::Register()); + void MaybeRecordImplicitNullCheck(HInstruction* instr) final { + // The function must be only be called within special scopes + // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of + // veneer/literal pools by VIXL assembler. + CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true) + << "The function must only be called within EmissionCheckScope or ExactAssemblyScope"; + CodeGenerator::MaybeRecordImplicitNullCheck(instr); + } + private: // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. |