diff options
author | 2015-07-01 15:41:14 +0100 | |
---|---|---|
committer | 2015-07-01 16:31:41 +0100 | |
commit | 4d02711ea578dbb789abb30cbaf12f9926e13d81 (patch) | |
tree | 29c802afff6e73c06021c44e6b2ec9d8340c75e9 /compiler | |
parent | 312f1bfcfd8f655e635c941dda147377d8bff814 (diff) |
Implement heap poisoning in ART's Optimizing compiler.
- Instrument ARM, ARM64, x86 and x86-64 code generators.
- Note: To turn heap poisoning on in Optimizing, set the
environment variable `ART_HEAP_POISONING' to "true"
before compiling ART.
Bug: 12687968
Change-Id: Ib3120b38cf805a8a50207a314b9ccc90c8d93740
Diffstat (limited to 'compiler')
25 files changed, 525 insertions, 96 deletions
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index b828fcf7e1..d215662645 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -108,6 +108,13 @@ class CommonCompilerTest : public CommonRuntimeTest { std::list<std::vector<uint8_t>> header_code_and_maps_chunks_; }; +// TODO: When heap reference poisoning works with all compilers in use, get rid of this. +#define TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK() \ + if (kPoisonHeapReferences && GetCompilerKind() == Compiler::kQuick) { \ + printf("WARNING: TEST DISABLED FOR HEAP REFERENCE POISONING WITH QUICK\n"); \ + return; \ + } + // TODO: When non-PIC works with all compilers in use, get rid of this. #define TEST_DISABLED_FOR_NON_PIC_COMPILING_WITH_OPTIMIZING() \ if (GetCompilerKind() == Compiler::kOptimizing) { \ diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 28c485a41c..39496a4b30 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -653,6 +653,12 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item, uint32_t method_idx, jobject class_loader, const DexFile& dex_file) const { + if (kPoisonHeapReferences) { + VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file) + << " Reason = Quick does not support heap poisoning."; + return nullptr; + } + // TODO: check method fingerprint here to determine appropriate backend type. Until then, use // build default. CompilerDriver* driver = GetCompilerDriver(); diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index ba03f5a5d4..b358f4f396 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -146,7 +146,7 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { } TEST_F(CompilerDriverTest, AbstractMethodErrorStub) { - TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); + TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK(); jobject class_loader; { ScopedObjectAccess soa(Thread::Current()); @@ -192,6 +192,7 @@ class CompilerDriverMethodsTest : public CompilerDriverTest { }; TEST_F(CompilerDriverMethodsTest, Selection) { + TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK(); Thread* self = Thread::Current(); jobject class_loader; { diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 85fd6962fa..953dfcb2c3 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -138,7 +138,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); // Check handle scope offset is within frame CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); - // Note this LoadRef() doesn't need heap poisoning since its from the ArtMethod. + // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod. // Note this LoadRef() does not include read barrier. It will be handled below. __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 39c316fa31..e3683ef0dd 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -332,8 +332,6 @@ class DeoptimizationSlowPathARM : public SlowPathCodeARM { }; #undef __ - -#undef __ #define __ down_cast<ArmAssembler*>(GetAssembler())-> inline Condition ARMCondition(IfCondition cond) { @@ -1383,6 +1381,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { DCHECK(receiver.IsRegister()); __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); @@ -1422,6 +1421,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetImtEntryAt(method_offset); uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); @@ -2778,6 +2778,8 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { InvokeRuntimeCallingConvention calling_convention; __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), @@ -2797,6 +2799,8 @@ void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) { void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) { InvokeRuntimeCallingConvention calling_convention; __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), @@ -3030,10 +3034,12 @@ void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldI bool generate_volatile = field_info.IsVolatile() && is_wide && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); // Temporary registers for the write barrier. // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark. - if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { - locations->AddTemp(Location::RequiresRegister()); + if (needs_write_barrier) { + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); } else if (generate_volatile) { // Arm encoding have some additional constraints for ldrexd/strexd: @@ -3066,6 +3072,8 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); Primitive::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kAnyStore); @@ -3086,7 +3094,18 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, case Primitive::kPrimInt: case Primitive::kPrimNot: { - __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(field_type, Primitive::kPrimNot); + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ Mov(temp, value.AsRegister<Register>()); + __ PoisonHeapReference(temp); + __ StoreToOffset(kStoreWord, temp, base, offset); + } else { + __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset); + } break; } @@ -3265,6 +3284,10 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } + + if (field_type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(out.AsRegister<Register>()); + } } void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { @@ -3352,8 +3375,9 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); Location index = locations->InAt(1); + Primitive::Type type = instruction->GetType(); - switch (instruction->GetType()) { + switch (type) { case Primitive::kPrimBoolean: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); Register out = locations->Out().AsRegister<Register>(); @@ -3470,10 +3494,15 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { } case Primitive::kPrimVoid: - LOG(FATAL) << "Unreachable type " << instruction->GetType(); + LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } codegen_->MaybeRecordImplicitNullCheck(instruction); + + if (type == Primitive::kPrimNot) { + Register out = locations->Out().AsRegister<Register>(); + __ MaybeUnpoisonHeapReference(out); + } } void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) { @@ -3501,7 +3530,7 @@ void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) { if (needs_write_barrier) { // Temporary registers for the write barrier. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. locations->AddTemp(Location::RequiresRegister()); } } @@ -3552,14 +3581,25 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { if (!needs_runtime_call) { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); Register value = locations->InAt(2).AsRegister<Register>(); + Register source = value; + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(value_type, Primitive::kPrimNot); + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ Mov(temp, value); + __ PoisonHeapReference(temp); + source = temp; + } if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ StoreToOffset(kStoreWord, value, obj, offset); + __ StoreToOffset(kStoreWord, source, obj, offset); } else { DCHECK(index.IsRegister()) << index; __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); - __ StoreToOffset(kStoreWord, value, IP, data_offset); + __ StoreToOffset(kStoreWord, source, IP, data_offset); } codegen_->MaybeRecordImplicitNullCheck(instruction); if (needs_write_barrier) { @@ -3570,6 +3610,8 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); + // Note: if heap poisoning is enabled, pAputObject takes cares + // of poisoning the reference. codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), @@ -3994,6 +4036,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); + __ MaybeUnpoisonHeapReference(out); SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); @@ -4053,7 +4096,9 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { __ LoadFromOffset( kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); + __ MaybeUnpoisonHeapReference(out); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); + __ MaybeUnpoisonHeapReference(out); __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -4111,6 +4156,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadWord, out, obj, class_offset); + __ MaybeUnpoisonHeapReference(out); __ cmp(out, ShifterOperand(cls)); if (instruction->IsClassFinal()) { // Classes must be equal for the instanceof to succeed. @@ -4164,7 +4210,10 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { } // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadWord, temp, obj, class_offset); + __ MaybeUnpoisonHeapReference(temp); __ cmp(temp, ShifterOperand(cls)); + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. __ b(slow_path->GetEntryLabel(), NE); __ Bind(slow_path->GetExitLabel()); } @@ -4316,5 +4365,8 @@ void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +#undef __ +#undef QUICK_ENTRY_POINT + } // namespace arm } // namespace art diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 9b7124d33d..a9a95d3649 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1250,6 +1250,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) { void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); + Primitive::Type field_type = field_info.GetFieldType(); BlockPoolsScope block_pools(GetVIXLAssembler()); MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset()); @@ -1260,15 +1261,19 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, // NB: LoadAcquire will record the pc info if needed. codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); } else { - codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field); + codegen_->Load(field_type, OutputCPURegister(instruction), field); codegen_->MaybeRecordImplicitNullCheck(instruction); // For IRIW sequential consistency kLoadAny is not sufficient. GenerateMemoryBarrier(MemBarrierKind::kAnyAny); } } else { - codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field); + codegen_->Load(field_type, OutputCPURegister(instruction), field); codegen_->MaybeRecordImplicitNullCheck(instruction); } + + if (field_type == Primitive::kPrimNot) { + GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W()); + } } void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) { @@ -1290,23 +1295,38 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction, Register obj = InputRegisterAt(instruction, 0); CPURegister value = InputCPURegisterAt(instruction, 1); + CPURegister source = value; Offset offset = field_info.GetFieldOffset(); Primitive::Type field_type = field_info.GetFieldType(); bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); - if (field_info.IsVolatile()) { - if (use_acquire_release) { - codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); + { + // We use a block to end the scratch scope before the write barrier, thus + // freeing the temporary registers so they can be used in `MarkGCCard`. + UseScratchRegisterScope temps(GetVIXLAssembler()); + + if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + DCHECK(value.IsW()); + Register temp = temps.AcquireW(); + __ Mov(temp, value.W()); + GetAssembler()->PoisonHeapReference(temp.W()); + source = temp; + } + + if (field_info.IsVolatile()) { + if (use_acquire_release) { + codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } else { + GenerateMemoryBarrier(MemBarrierKind::kAnyStore); + codegen_->Store(field_type, source, HeapOperand(obj, offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + GenerateMemoryBarrier(MemBarrierKind::kAnyAny); + } } else { - GenerateMemoryBarrier(MemBarrierKind::kAnyStore); - codegen_->Store(field_type, value, HeapOperand(obj, offset)); + codegen_->Store(field_type, source, HeapOperand(obj, offset)); codegen_->MaybeRecordImplicitNullCheck(instruction); - GenerateMemoryBarrier(MemBarrierKind::kAnyAny); } - } else { - codegen_->Store(field_type, value, HeapOperand(obj, offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); } if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { @@ -1464,6 +1484,10 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { codegen_->Load(type, OutputCPURegister(instruction), source); codegen_->MaybeRecordImplicitNullCheck(instruction); + + if (type == Primitive::kPrimNot) { + GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W()); + } } void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { @@ -1506,12 +1530,15 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { bool needs_runtime_call = locations->WillCall(); if (needs_runtime_call) { + // Note: if heap poisoning is enabled, pAputObject takes cares + // of poisoning the reference. codegen_->InvokeRuntime( QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr); CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); } else { Register obj = InputRegisterAt(instruction, 0); CPURegister value = InputCPURegisterAt(instruction, 2); + CPURegister source = value; Location index = locations->InAt(1); size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); MemOperand destination = HeapOperand(obj); @@ -1522,6 +1549,14 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { // freeing the temporary registers so they can be used in `MarkGCCard`. UseScratchRegisterScope temps(masm); + if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) { + DCHECK(value.IsW()); + Register temp = temps.AcquireW(); + __ Mov(temp, value.W()); + GetAssembler()->PoisonHeapReference(temp.W()); + source = temp; + } + if (index.IsConstant()) { offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); destination = HeapOperand(obj, offset); @@ -1532,7 +1567,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { destination = HeapOperand(temp, offset); } - codegen_->Store(value_type, value, destination); + codegen_->Store(value_type, source, destination); codegen_->MaybeRecordImplicitNullCheck(instruction); } if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) { @@ -1585,7 +1620,10 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { } // Compare the class of `obj` with `cls`. __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); + GetAssembler()->MaybeUnpoisonHeapReference(obj_cls.W()); __ Cmp(obj_cls, cls); + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. __ B(ne, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -2152,6 +2190,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { // Compare the class of `obj` with `cls`. __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); + GetAssembler()->MaybeUnpoisonHeapReference(out.W()); __ Cmp(out, cls); if (instruction->IsClassFinal()) { // Classes must be equal for the instanceof to succeed. @@ -2225,6 +2264,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); + GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); // temp = temp->GetImtEntryAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -2350,6 +2390,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { DCHECK(receiver.IsRegister()); __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); codegen_->MaybeRecordImplicitNullCheck(invoke); + GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); // temp = temp->GetMethodAt(method_offset); __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); @@ -2379,6 +2420,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { DCHECK(cls->CanCallRuntime()); __ Ldr(out, MemOperand(current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); + GetAssembler()->MaybeUnpoisonHeapReference(out.W()); SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); @@ -2428,7 +2470,9 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { Register current_method = InputRegisterAt(load, 0); __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value())); __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); + GetAssembler()->MaybeUnpoisonHeapReference(out.W()); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); + GetAssembler()->MaybeUnpoisonHeapReference(out.W()); __ Cbz(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -2563,6 +2607,8 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); DCHECK(type_index.Is(w0)); __ Mov(type_index, instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, @@ -2586,6 +2632,8 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); DCHECK(type_index.Is(w0)); __ Mov(type_index, instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 931d751db0..262b234d2d 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1315,9 +1315,11 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + // temp = object->GetClass(); DCHECK(receiver.IsRegister()); __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset)); codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -1354,7 +1356,8 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) } else { __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset)); } - codegen_->MaybeRecordImplicitNullCheck(invoke); + codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetImtEntryAt(method_offset); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3001,6 +3004,8 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint()))); codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); @@ -3021,6 +3026,8 @@ void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) { InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint()))); codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); @@ -3397,6 +3404,10 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } + + if (field_type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(out.AsRegister<Register>()); + } } void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) { @@ -3420,9 +3431,9 @@ void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldI } else { locations->SetInAt(1, Location::RequiresRegister()); } - // Temporary registers for the write barrier. if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { - locations->AddTemp(Location::RequiresRegister()); + // Temporary registers for the write barrier. + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. // Ensure the card is in a byte register. locations->AddTemp(Location::RegisterLocation(ECX)); } else if (is_volatile && (field_type == Primitive::kPrimLong)) { @@ -3447,6 +3458,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, bool is_volatile = field_info.IsVolatile(); Primitive::Type field_type = field_info.GetFieldType(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kAnyStore); @@ -3467,7 +3480,18 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, case Primitive::kPrimInt: case Primitive::kPrimNot: { - __ movl(Address(base, offset), value.AsRegister<Register>()); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as the reference does not + // need poisoning. + DCHECK_EQ(field_type, Primitive::kPrimNot); + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ movl(temp, value.AsRegister<Register>()); + __ PoisonHeapReference(temp); + __ movl(Address(base, offset), temp); + } else { + __ movl(Address(base, offset), value.AsRegister<Register>()); + } break; } @@ -3508,7 +3532,7 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, codegen_->MaybeRecordImplicitNullCheck(instruction); } - if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { + if (needs_write_barrier) { Register temp = locations->GetTemp(0).AsRegister<Register>(); Register card = locations->GetTemp(1).AsRegister<Register>(); codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>(), value_can_be_null); @@ -3737,6 +3761,11 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { if (type != Primitive::kPrimLong) { codegen_->MaybeRecordImplicitNullCheck(instruction); } + + if (type == Primitive::kPrimNot) { + Register out = locations->Out().AsRegister<Register>(); + __ MaybeUnpoisonHeapReference(out); + } } void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { @@ -3776,9 +3805,9 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { } else { locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2))); } - // Temporary registers for the write barrier. if (needs_write_barrier) { - locations->AddTemp(Location::RequiresRegister()); + // Temporary registers for the write barrier. + locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. // Ensure the card is in a byte register. locations->AddTemp(Location::RegisterLocation(ECX)); } @@ -3852,21 +3881,43 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; if (value.IsRegister()) { - __ movl(Address(obj, offset), value.AsRegister<Register>()); + if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) { + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ movl(temp, value.AsRegister<Register>()); + __ PoisonHeapReference(temp); + __ movl(Address(obj, offset), temp); + } else { + __ movl(Address(obj, offset), value.AsRegister<Register>()); + } } else { DCHECK(value.IsConstant()) << value; - __ movl(Address(obj, offset), - Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + // `value_type == Primitive::kPrimNot` implies `v == 0`. + DCHECK((value_type != Primitive::kPrimNot) || (v == 0)); + // Note: if heap poisoning is enabled, no need to poison + // (negate) `v` if it is a reference, as it would be null. + __ movl(Address(obj, offset), Immediate(v)); } } else { DCHECK(index.IsRegister()) << index; if (value.IsRegister()) { - __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), - value.AsRegister<Register>()); + if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) { + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ movl(temp, value.AsRegister<Register>()); + __ PoisonHeapReference(temp); + __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), temp); + } else { + __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), + value.AsRegister<Register>()); + } } else { DCHECK(value.IsConstant()) << value; - __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), - Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant()))); + int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + // `value_type == Primitive::kPrimNot` implies `v == 0`. + DCHECK((value_type != Primitive::kPrimNot) || (v == 0)); + // Note: if heap poisoning is enabled, no need to poison + // (negate) `v` if it is a reference, as it would be null. + __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), Immediate(v)); } } codegen_->MaybeRecordImplicitNullCheck(instruction); @@ -3880,6 +3931,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { } else { DCHECK_EQ(value_type, Primitive::kPrimNot); DCHECK(!codegen_->IsLeafMethod()); + // Note: if heap poisoning is enabled, pAputObject takes cares + // of poisoning the reference. __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject))); codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } @@ -4343,6 +4396,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { __ movl(out, Address( current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); + __ MaybeUnpoisonHeapReference(out); SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); @@ -4400,7 +4454,9 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { Register current_method = locations->InAt(0).AsRegister<Register>(); __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); + __ MaybeUnpoisonHeapReference(out); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); + __ MaybeUnpoisonHeapReference(out); __ testl(out, out); __ j(kEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -4455,8 +4511,9 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { __ testl(obj, obj); __ j(kEqual, &zero); } - __ movl(out, Address(obj, class_offset)); // Compare the class of `obj` with `cls`. + __ movl(out, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(out); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<Register>()); } else { @@ -4514,16 +4571,17 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { __ testl(obj, obj); __ j(kEqual, slow_path->GetExitLabel()); } - - __ movl(temp, Address(obj, class_offset)); // Compare the class of `obj` with `cls`. + __ movl(temp, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(temp); if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<Register>()); } else { DCHECK(cls.IsStackSlot()) << cls; __ cmpl(temp, Address(ESP, cls.GetStackIndex())); } - + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. __ j(kNotEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -4687,5 +4745,7 @@ void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +#undef __ + } // namespace x86 } // namespace art diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index afffbe204f..c9d19c8f66 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -213,7 +213,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex())); __ gs()->call(Address::Absolute((do_clinit_ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage) - : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true)); + : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)), true)); RecordPcInfo(codegen, at_, dex_pc_); Location out = locations->Out(); @@ -1429,6 +1429,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) DCHECK(receiver.IsRegister()); __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset)); codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -1466,6 +1467,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetImtEntryAt(method_offset); __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); @@ -3060,6 +3062,8 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) InvokeRuntimeCallingConvention calling_convention; codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)), instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. __ gs()->call( Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true)); @@ -3082,6 +3086,8 @@ void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) { codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)), instruction->GetTypeIndex()); + // Note: if heap poisoning is enabled, the entry point takes cares + // of poisoning the reference. __ gs()->call( Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true)); @@ -3270,6 +3276,10 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } + + if (field_type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(out.AsRegister<CpuRegister>()); + } } void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, @@ -3278,8 +3288,9 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + Primitive::Type field_type = field_info.GetFieldType(); bool needs_write_barrier = - CodeGenerator::StoreNeedsWriteBarrier(field_info.GetFieldType(), instruction->InputAt(1)); + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); locations->SetInAt(0, Location::RequiresRegister()); if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { @@ -3289,7 +3300,10 @@ void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction, } if (needs_write_barrier) { // Temporary registers for the write barrier. + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); + } else if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + // Temporary register for the reference poisoning. locations->AddTemp(Location::RequiresRegister()); } } @@ -3337,9 +3351,20 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction, case Primitive::kPrimNot: { if (value.IsConstant()) { int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + // `field_type == Primitive::kPrimNot` implies `v == 0`. + DCHECK((field_type != Primitive::kPrimNot) || (v == 0)); + // Note: if heap poisoning is enabled, no need to poison + // (negate) `v` if it is a reference, as it would be null. __ movl(Address(base, offset), Immediate(v)); } else { - __ movl(Address(base, offset), value.AsRegister<CpuRegister>()); + if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) { + CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); + __ movl(temp, value.AsRegister<CpuRegister>()); + __ PoisonHeapReference(temp); + __ movl(Address(base, offset), temp); + } else { + __ movl(Address(base, offset), value.AsRegister<CpuRegister>()); + } } break; } @@ -3483,8 +3508,9 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { LocationSummary* locations = instruction->GetLocations(); CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); Location index = locations->InAt(1); + Primitive::Type type = instruction->GetType(); - switch (instruction->GetType()) { + switch (type) { case Primitive::kPrimBoolean: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); @@ -3585,10 +3611,15 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { } case Primitive::kPrimVoid: - LOG(FATAL) << "Unreachable type " << instruction->GetType(); + LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } codegen_->MaybeRecordImplicitNullCheck(instruction); + + if (type == Primitive::kPrimNot) { + CpuRegister out = locations->Out().AsRegister<CpuRegister>(); + __ MaybeUnpoisonHeapReference(out); + } } void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { @@ -3620,7 +3651,7 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { if (needs_write_barrier) { // Temporary registers for the write barrier. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. locations->AddTemp(Location::RequiresRegister()); } } @@ -3696,20 +3727,42 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; if (value.IsRegister()) { - __ movl(Address(obj, offset), value.AsRegister<CpuRegister>()); + if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) { + CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); + __ movl(temp, value.AsRegister<CpuRegister>()); + __ PoisonHeapReference(temp); + __ movl(Address(obj, offset), temp); + } else { + __ movl(Address(obj, offset), value.AsRegister<CpuRegister>()); + } } else { DCHECK(value.IsConstant()) << value; int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + // `value_type == Primitive::kPrimNot` implies `v == 0`. + DCHECK((value_type != Primitive::kPrimNot) || (v == 0)); + // Note: if heap poisoning is enabled, no need to poison + // (negate) `v` if it is a reference, as it would be null. __ movl(Address(obj, offset), Immediate(v)); } } else { DCHECK(index.IsRegister()) << index; if (value.IsRegister()) { - __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), - value.AsRegister<CpuRegister>()); + if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) { + CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); + __ movl(temp, value.AsRegister<CpuRegister>()); + __ PoisonHeapReference(temp); + __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), temp); + } else { + __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), + value.AsRegister<CpuRegister>()); + } } else { DCHECK(value.IsConstant()) << value; int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant()); + // `value_type == Primitive::kPrimNot` implies `v == 0`. + DCHECK((value_type != Primitive::kPrimNot) || (v == 0)); + // Note: if heap poisoning is enabled, no need to poison + // (negate) `v` if it is a reference, as it would be null. __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), Immediate(v)); } @@ -3724,6 +3777,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); + // Note: if heap poisoning is enabled, pAputObject takes cares + // of poisoning the reference. __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), true)); DCHECK(!codegen_->IsLeafMethod()); @@ -3876,7 +3931,7 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp, Thread::CardTableOffset<kX86_64WordSize>().Int32Value(), true)); __ movq(temp, object); __ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift)); - __ movb(Address(temp, card, TIMES_1, 0), card); + __ movb(Address(temp, card, TIMES_1, 0), card); if (value_can_be_null) { __ Bind(&is_null); } @@ -4187,6 +4242,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { __ movl(out, Address( current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); + __ MaybeUnpoisonHeapReference(out); + SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); @@ -4234,7 +4291,9 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>(); __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); + __ MaybeUnpoisonHeapReference(out); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); + __ MaybeUnpoisonHeapReference(out); __ testl(out, out); __ j(kEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -4293,6 +4352,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } // Compare the class of `obj` with `cls`. __ movl(out, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(out); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<CpuRegister>()); } else { @@ -4351,13 +4411,15 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { } // Compare the class of `obj` with `cls`. __ movl(temp, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(temp); if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<CpuRegister>()); } else { DCHECK(cls.IsStackSlot()) << cls; __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); } - // Classes must be equal for the checkcast to succeed. + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. __ j(kNotEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -4576,5 +4638,7 @@ Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) { return Address::RIP(fixup); } +#undef __ + } // namespace x86_64 } // namespace art diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 71fadfbcc2..b4dbf75f0a 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -510,6 +510,11 @@ static void GenUnsafeGet(HInvoke* invoke, if (is_volatile) { __ dmb(ISH); } + + if (type == Primitive::kPrimNot) { + Register trg = locations->Out().AsRegister<Register>(); + __ MaybeUnpoisonHeapReference(trg); + } } static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -649,8 +654,15 @@ static void GenUnsafePut(LocationSummary* locations, __ strd(value_lo, Address(IP)); } } else { - value = locations->InAt(3).AsRegister<Register>(); - __ str(value, Address(base, offset)); + value = locations->InAt(3).AsRegister<Register>(); + Register source = value; + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ Mov(temp, value); + __ PoisonHeapReference(temp); + source = temp; + } + __ str(source, Address(base, offset)); } if (is_volatile) { @@ -738,6 +750,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat __ add(tmp_ptr, base, ShifterOperand(offset)); + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + codegen->GetAssembler()->PoisonHeapReference(expected_lo); + codegen->GetAssembler()->PoisonHeapReference(value_lo); + } + // do { // tmp = [r_ptr] - expected; // } while (tmp == 0 && failure([r_ptr] <- r_new_value)); @@ -761,6 +778,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat __ rsbs(out, tmp_lo, ShifterOperand(1)); __ it(CC); __ mov(out, ShifterOperand(0), CC); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + codegen->GetAssembler()->UnpoisonHeapReference(value_lo); + codegen->GetAssembler()->UnpoisonHeapReference(expected_lo); + } } void IntrinsicLocationsBuilderARM::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1047,5 +1069,9 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) +#undef UNIMPLEMENTED_INTRINSIC + +#undef __ + } // namespace arm } // namespace art diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 8bcb88b4ea..78ac167a87 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -683,6 +683,11 @@ static void GenUnsafeGet(HInvoke* invoke, } else { codegen->Load(type, trg, mem_op); } + + if (type == Primitive::kPrimNot) { + DCHECK(trg.IsW()); + codegen->GetAssembler()->MaybeUnpoisonHeapReference(trg); + } } static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { @@ -781,22 +786,37 @@ static void GenUnsafePut(LocationSummary* locations, Register base = WRegisterFrom(locations->InAt(1)); // Object pointer. Register offset = XRegisterFrom(locations->InAt(2)); // Long offset. Register value = RegisterFrom(locations->InAt(3), type); + Register source = value; bool use_acquire_release = codegen->GetInstructionSetFeatures().PreferAcquireRelease(); MemOperand mem_op(base.X(), offset); - if (is_volatile || is_ordered) { - if (use_acquire_release) { - codegen->StoreRelease(type, value, mem_op); - } else { - __ Dmb(InnerShareable, BarrierAll); - codegen->Store(type, value, mem_op); - if (is_volatile) { - __ Dmb(InnerShareable, BarrierReads); + { + // We use a block to end the scratch scope before the write barrier, thus + // freeing the temporary registers so they can be used in `MarkGCCard`. + UseScratchRegisterScope temps(masm); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + DCHECK(value.IsW()); + Register temp = temps.AcquireW(); + __ Mov(temp.W(), value.W()); + codegen->GetAssembler()->PoisonHeapReference(temp.W()); + source = temp; + } + + if (is_volatile || is_ordered) { + if (use_acquire_release) { + codegen->StoreRelease(type, source, mem_op); + } else { + __ Dmb(InnerShareable, BarrierAll); + codegen->Store(type, source, mem_op); + if (is_volatile) { + __ Dmb(InnerShareable, BarrierReads); + } } + } else { + codegen->Store(type, source, mem_op); } - } else { - codegen->Store(type, value, mem_op); } if (type == Primitive::kPrimNot) { @@ -872,6 +892,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat __ Add(tmp_ptr, base.X(), Operand(offset)); + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + codegen->GetAssembler()->PoisonHeapReference(expected); + codegen->GetAssembler()->PoisonHeapReference(value); + } + // do { // tmp_value = [tmp_ptr] - expected; // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value)); @@ -897,6 +922,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat } __ Bind(&exit_loop); __ Cset(out, eq); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + codegen->GetAssembler()->UnpoisonHeapReference(value); + codegen->GetAssembler()->UnpoisonHeapReference(expected); + } } void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1173,5 +1203,9 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) +#undef UNIMPLEMENTED_INTRINSIC + +#undef __ + } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index b04cc5cace..0d6ca09f31 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -1335,9 +1335,14 @@ static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type, switch (type) { case Primitive::kPrimInt: - case Primitive::kPrimNot: - __ movl(output.AsRegister<Register>(), Address(base, offset, ScaleFactor::TIMES_1, 0)); + case Primitive::kPrimNot: { + Register output_reg = output.AsRegister<Register>(); + __ movl(output_reg, Address(base, offset, ScaleFactor::TIMES_1, 0)); + if (type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(output_reg); + } break; + } case Primitive::kPrimLong: { Register output_lo = output.AsRegisterPairLow<Register>(); @@ -1436,7 +1441,7 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, locations->SetInAt(3, Location::RequiresRegister()); if (type == Primitive::kPrimNot) { // Need temp registers for card-marking. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. // Ensure the value is in a byte register. locations->AddTemp(Location::RegisterLocation(ECX)); } else if (type == Primitive::kPrimLong && is_volatile) { @@ -1498,6 +1503,11 @@ static void GenUnsafePut(LocationSummary* locations, __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_lo); __ movl(Address(base, offset, ScaleFactor::TIMES_1, 4), value_hi); } + } else if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + Register temp = locations->GetTemp(0).AsRegister<Register>(); + __ movl(temp, value_loc.AsRegister<Register>()); + __ PoisonHeapReference(temp); + __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), temp); } else { __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_loc.AsRegister<Register>()); } @@ -1604,7 +1614,8 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0)); } else { // Integer or object. - DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX); + Register expected = locations->InAt(3).AsRegister<Register>(); + DCHECK_EQ(expected, EAX); Register value = locations->InAt(4).AsRegister<Register>(); if (type == Primitive::kPrimNot) { // Mark card for object assuming new value is stored. @@ -1614,6 +1625,11 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code base, value, value_can_be_null); + + if (kPoisonHeapReferences) { + __ PoisonHeapReference(expected); + __ PoisonHeapReference(value); + } } __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); @@ -1625,6 +1641,13 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* code // Convert ZF into the boolean result. __ setb(kZero, out.AsRegister<Register>()); __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>()); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + Register value = locations->InAt(4).AsRegister<Register>(); + __ UnpoisonHeapReference(value); + // Do not unpoison the reference contained in register `expected`, + // as it is the same as register `out`. + } } void IntrinsicCodeGeneratorX86::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1734,5 +1757,9 @@ UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +#undef UNIMPLEMENTED_INTRINSIC + +#undef __ + } // namespace x86 } // namespace art diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 888c7b8037..ea342e9382 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1251,6 +1251,9 @@ static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type, case Primitive::kPrimInt: case Primitive::kPrimNot: __ movl(trg, Address(base, offset, ScaleFactor::TIMES_1, 0)); + if (type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(trg); + } break; case Primitive::kPrimLong: @@ -1325,7 +1328,7 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena, locations->SetInAt(3, Location::RequiresRegister()); if (type == Primitive::kPrimNot) { // Need temp registers for card-marking. - locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); } } @@ -1369,6 +1372,11 @@ static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool if (type == Primitive::kPrimLong) { __ movq(Address(base, offset, ScaleFactor::TIMES_1, 0), value); + } else if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); + __ movl(temp, value); + __ PoisonHeapReference(temp); + __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), temp); } else { __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value); } @@ -1471,6 +1479,11 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c base, value, value_can_be_null); + + if (kPoisonHeapReferences) { + __ PoisonHeapReference(expected); + __ PoisonHeapReference(value); + } } __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value); @@ -1482,6 +1495,11 @@ static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* c // Convert ZF into the boolean result. __ setcc(kZero, out); __ movzxb(out, out); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ UnpoisonHeapReference(value); + __ UnpoisonHeapReference(expected); + } } void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASInt(HInvoke* invoke) { @@ -1598,5 +1616,9 @@ UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +#undef UNIMPLEMENTED_INTRINSIC + +#undef __ + } // namespace x86_64 } // namespace art diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 0086fe8c49..09d22703fe 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -529,13 +529,13 @@ void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src, } void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { ArmManagedRegister dst = mdest.AsArm(); CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; LoadFromOffset(kLoadWord, dst.AsCoreRegister(), base.AsArm().AsCoreRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && poison_reference) { - rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0)); + if (unpoison_reference) { + MaybeUnpoisonHeapReference(dst.AsCoreRegister()); } } diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index f8ca48ef57..5d85d11054 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -774,7 +774,7 @@ class ArmAssembler : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; @@ -857,6 +857,27 @@ class ArmAssembler : public Assembler { return r >= R8; } + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(Register reg) { + // reg = -reg. + rsb(reg, reg, ShifterOperand(0)); + } + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(Register reg) { + // reg = -reg. + rsb(reg, reg, ShifterOperand(0)); + } + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + protected: // Returns whether or not the given register is used for passing parameters. static int RegisterCompare(const Register* reg1, const Register* reg2) { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 077579c882..0e17512041 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -298,15 +298,15 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { } void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { Arm64ManagedRegister dst = m_dst.AsArm64(); Arm64ManagedRegister base = m_base.AsArm64(); CHECK(dst.IsXRegister() && base.IsXRegister()); LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && poison_reference) { + if (unpoison_reference) { WRegister ref_reg = dst.AsOverlappingWRegister(); - ___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg))); + MaybeUnpoisonHeapReference(reg_w(ref_reg)); } } @@ -784,5 +784,25 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, cfi_.DefCFAOffset(frame_size); } +void Arm64Assembler::PoisonHeapReference(vixl::Register reg) { + DCHECK(reg.IsW()); + // reg = -reg. + ___ Neg(reg, vixl::Operand(reg)); +} + +void Arm64Assembler::UnpoisonHeapReference(vixl::Register reg) { + DCHECK(reg.IsW()); + // reg = -reg. + ___ Neg(reg, vixl::Operand(reg)); +} + +void Arm64Assembler::MaybeUnpoisonHeapReference(vixl::Register reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } +} + +#undef ___ + } // namespace arm64 } // namespace art diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index db95537f93..05882a30b0 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -10,7 +10,7 @@ * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and + * See the License for the specific language governing permissions and * limitations under the License. */ @@ -116,7 +116,7 @@ class Arm64Assembler FINAL : public Assembler { void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE; @@ -182,6 +182,17 @@ class Arm64Assembler FINAL : public Assembler { // and branch to a ExceptionSlowPath if it is. void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(vixl::Register reg); + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(vixl::Register reg); + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(vixl::Register reg); + private: static vixl::Register reg_x(int code) { CHECK(code < kNumberOfXRegisters) << code; diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index ee2d594e6f..3097cd55c0 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -441,9 +441,9 @@ class Assembler { virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size); virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; - // If poison_reference is true and kPoisonReference is true, then we negate the read reference. + // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference. virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool poison_reference) = 0; + bool unpoison_reference) = 0; virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index e55b461127..c09dfcce4f 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -697,12 +697,12 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { } void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { MipsManagedRegister dest = mdest.AsMips(); CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); LoadFromOffset(kLoadWord, dest.AsCoreRegister(), base.AsMips().AsCoreRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && poison_reference) { + if (kPoisonHeapReferences && unpoison_reference) { Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister()); } } diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 7b0fc39d17..0d1b82ce7b 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -192,7 +192,7 @@ class MipsAssembler FINAL : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 3333cd22bd..24ea9e25db 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -1242,12 +1242,12 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { } void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { Mips64ManagedRegister dest = mdest.AsMips64(); CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister()); LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), base.AsMips64().AsGpuRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && poison_reference) { + if (kPoisonHeapReferences && unpoison_reference) { // TODO: review // Negate the 32-bit ref Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister()); diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 88cc4bcd00..47b146a28c 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -265,7 +265,7 @@ class Mips64Assembler FINAL : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 390d46ede4..fa85ada864 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1910,12 +1910,12 @@ void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { } void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); - if (kPoisonHeapReferences && poison_reference) { - negl(dest.AsCpuRegister()); + if (unpoison_reference) { + MaybeUnpoisonHeapReference(dest.AsCpuRegister()); } } diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 1c1c023711..d1b4e1dc5f 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -541,7 +541,7 @@ class X86Assembler FINAL : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; @@ -616,6 +616,21 @@ class X86Assembler FINAL : public Assembler { // and branch to a ExceptionSlowPath if it is. void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(Register reg) { negl(reg); } + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(Register reg) { negl(reg); } + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + private: inline void EmitUint8(uint8_t value); inline void EmitInt32(int32_t value); diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index ac95c7127a..f35f51c494 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -2597,12 +2597,12 @@ void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { } void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool poison_reference) { + bool unpoison_reference) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs)); - if (kPoisonHeapReferences && poison_reference) { - negl(dest.AsCpuRegister()); + if (unpoison_reference) { + MaybeUnpoisonHeapReference(dest.AsCpuRegister()); } } diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 6b2b65d6c1..61ffeab1e8 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -669,7 +669,7 @@ class X86_64Assembler FINAL : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool poison_reference) OVERRIDE; + bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; @@ -767,6 +767,21 @@ class X86_64Assembler FINAL : public Assembler { // Is the constant area empty? Return true if there are no literals in the constant area. bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; } + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(CpuRegister reg) { negl(reg); } + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(CpuRegister reg) { negl(reg); } + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(CpuRegister reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + private: void EmitUint8(uint8_t value); void EmitInt32(int32_t value); |