diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 58 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 58 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 52 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 52 |
7 files changed, 155 insertions, 109 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 046c2d8b87..c6363d1708 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -489,14 +489,6 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -510,10 +502,10 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -521,7 +513,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } else { DCHECK(instruction_->IsCheckCast()); @@ -6114,16 +6106,15 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { __ CompareAndBranchIfZero(obj, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - maybe_temp_loc, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); __ cmp(out, ShifterOperand(cls)); // Classes must be equal for the instanceof to succeed. __ b(&zero, NE); @@ -6133,6 +6124,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. Label loop; @@ -6155,6 +6153,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. Label loop, success; __ Bind(&loop); @@ -6178,6 +6183,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Do an exact check. Label exact_check; __ cmp(out, ShifterOperand(cls)); @@ -6201,6 +6213,14 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); __ cmp(out, ShifterOperand(cls)); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction, diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a085fea0f8..4ab6065819 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -459,14 +459,6 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -482,15 +474,15 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); @@ -3427,16 +3419,15 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { __ Cbz(obj, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - maybe_temp_loc, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); __ Cmp(out, cls); __ Cset(out, eq); if (zero.IsLinked()) { @@ -3446,6 +3437,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. vixl::aarch64::Label loop, success; @@ -3468,6 +3466,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. vixl::aarch64::Label loop, success; __ Bind(&loop); @@ -3491,6 +3496,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Do an exact check. vixl::aarch64::Label exact_check; __ Cmp(out, cls); @@ -3514,6 +3526,14 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction, diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index f7957d402f..e9827e8620 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -444,14 +444,6 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -466,10 +458,10 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { // move resolver. InvokeRuntimeCallingConventionARMVIXL calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -477,8 +469,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); arm_codegen->Move32(locations->Out(), LocationFrom(r0)); } else { DCHECK(instruction_->IsCheckCast()); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index f169eb00f3..573bb507f2 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -378,14 +378,6 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -397,15 +389,15 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 7598740d3c..1a54935a25 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -322,14 +322,6 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() @@ -342,16 +334,15 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 45edff8767..2451b8d247 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -312,14 +312,6 @@ class TypeCheckSlowPathX86 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -333,10 +325,10 @@ class TypeCheckSlowPathX86 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - x86_codegen->EmitParallelMoves(arg0, + x86_codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -344,7 +336,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); x86_codegen->InvokeRuntime(kQuickCheckInstanceOf, @@ -6422,15 +6414,14 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { __ j(kEqual, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<Register>()); } else { @@ -6446,6 +6437,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. NearLabel loop; @@ -6474,6 +6471,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. NearLabel loop, success; __ Bind(&loop); @@ -6503,6 +6506,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Do an exact check. NearLabel exact_check; if (cls.IsRegister()) { @@ -6531,6 +6540,13 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kWithoutReadBarrier); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<Register>()); } else { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index f7a2f40f33..2425a4c3cb 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -332,14 +332,6 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -354,15 +346,15 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); @@ -5826,15 +5818,14 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { __ j(kEqual, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<CpuRegister>()); } else { @@ -5855,6 +5846,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. NearLabel loop, success; @@ -5883,6 +5880,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. NearLabel loop, success; __ Bind(&loop); @@ -5912,6 +5915,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Do an exact check. NearLabel exact_check; if (cls.IsRegister()) { @@ -5940,6 +5949,13 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kWithoutReadBarrier); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<CpuRegister>()); } else { |