diff options
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 58 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 58 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 52 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 52 | ||||
| -rw-r--r-- | runtime/arch/arm/entrypoints_init_arm.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/arm64/entrypoints_init_arm64.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/mips/entrypoints_init_mips.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/mips64/entrypoints_init_mips64.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/x86/entrypoints_init_x86.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/x86/quick_entrypoints_x86.S | 6 | ||||
| -rw-r--r-- | runtime/arch/x86_64/entrypoints_init_x86_64.cc | 4 | ||||
| -rw-r--r-- | runtime/arch/x86_64/quick_entrypoints_x86_64.S | 6 | ||||
| -rw-r--r-- | runtime/entrypoints/quick/quick_entrypoints_list.h | 2 | ||||
| -rw-r--r-- | runtime/oat.h | 2 |
17 files changed, 175 insertions, 129 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 046c2d8b87..c6363d1708 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -489,14 +489,6 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -510,10 +502,10 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -521,7 +513,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } else { DCHECK(instruction_->IsCheckCast()); @@ -6114,16 +6106,15 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { __ CompareAndBranchIfZero(obj, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - maybe_temp_loc, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); __ cmp(out, ShifterOperand(cls)); // Classes must be equal for the instanceof to succeed. __ b(&zero, NE); @@ -6133,6 +6124,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. Label loop; @@ -6155,6 +6153,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. Label loop, success; __ Bind(&loop); @@ -6178,6 +6183,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Do an exact check. Label exact_check; __ cmp(out, ShifterOperand(cls)); @@ -6201,6 +6213,14 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); __ cmp(out, ShifterOperand(cls)); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction, diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a085fea0f8..4ab6065819 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -459,14 +459,6 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -482,15 +474,15 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); @@ -3427,16 +3419,15 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { __ Cbz(obj, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - maybe_temp_loc, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); __ Cmp(out, cls); __ Cset(out, eq); if (zero.IsLinked()) { @@ -3446,6 +3437,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. vixl::aarch64::Label loop, success; @@ -3468,6 +3466,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. vixl::aarch64::Label loop, success; __ Bind(&loop); @@ -3491,6 +3496,13 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kCompilerReadBarrierOption); // Do an exact check. vixl::aarch64::Label exact_check; __ Cmp(out, cls); @@ -3514,6 +3526,14 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction, diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index f7957d402f..e9827e8620 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -444,14 +444,6 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -466,10 +458,10 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { // move resolver. InvokeRuntimeCallingConventionARMVIXL calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -477,8 +469,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); arm_codegen->Move32(locations->Out(), LocationFrom(r0)); } else { DCHECK(instruction_->IsCheckCast()); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index f169eb00f3..573bb507f2 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -378,14 +378,6 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -397,15 +389,15 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 7598740d3c..1a54935a25 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -322,14 +322,6 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() @@ -342,16 +334,15 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 45edff8767..2451b8d247 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -312,14 +312,6 @@ class TypeCheckSlowPathX86 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -333,10 +325,10 @@ class TypeCheckSlowPathX86 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - x86_codegen->EmitParallelMoves(arg0, + x86_codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { @@ -344,7 +336,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode { instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); x86_codegen->InvokeRuntime(kQuickCheckInstanceOf, @@ -6422,15 +6414,14 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { __ j(kEqual, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<Register>()); } else { @@ -6446,6 +6437,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. NearLabel loop; @@ -6474,6 +6471,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. NearLabel loop, success; __ Bind(&loop); @@ -6503,6 +6506,12 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Do an exact check. NearLabel exact_check; if (cls.IsRegister()) { @@ -6531,6 +6540,13 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kWithoutReadBarrier); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<Register>()); } else { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index f7a2f40f33..2425a4c3cb 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -332,14 +332,6 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location arg0, arg1; - if (instruction_->IsInstanceOf()) { - arg0 = locations->InAt(1); - arg1 = locations->Out(); - } else { - arg0 = locations->InAt(0); - arg1 = locations->InAt(1); - } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -354,15 +346,15 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(arg0, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - arg1, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); @@ -5826,15 +5818,14 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { __ j(kEqual, &zero); } - // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - out_loc, - obj_loc, - class_offset, - kCompilerReadBarrierOption); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<CpuRegister>()); } else { @@ -5855,6 +5846,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. NearLabel loop, success; @@ -5883,6 +5880,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Walk over the class hierarchy to find a match. NearLabel loop, success; __ Bind(&loop); @@ -5912,6 +5915,12 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kCompilerReadBarrierOption); // Do an exact check. NearLabel exact_check; if (cls.IsRegister()) { @@ -5940,6 +5949,13 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference<Class> */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kWithoutReadBarrier); if (cls.IsRegister()) { __ cmpl(out, cls.AsRegister<CpuRegister>()); } else { diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 5bd6b5633b..2a12f1d84a 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -30,7 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -86,7 +86,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { DefaultInitEntryPoints(jpoints, qpoints); // Cast - qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;; qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index e7c9fefece..6add107cb7 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -30,7 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -115,7 +115,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { DefaultInitEntryPoints(jpoints, qpoints); // Cast - qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pInstanceofNonTrivial = artInstanceOfFromCode; qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 6dca46fb07..cb0bdbfbee 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -30,7 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class); // Math entrypoints. extern int32_t CmpgDouble(double a, double b); @@ -74,7 +74,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { ResetQuickAllocEntryPoints(qpoints); // Cast - qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pInstanceofNonTrivial = artInstanceOfFromCode; static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct."); qpoints->pCheckInstanceOf = art_quick_check_instance_of; static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct."); diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 0e81906916..bc17d47366 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -30,7 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class); // Math entrypoints. extern int32_t CmpgDouble(double a, double b); @@ -67,7 +67,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { DefaultInitEntryPoints(jpoints, qpoints); // Cast - qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pInstanceofNonTrivial = artInstanceOfFromCode; qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index 94fea69beb..9cd4a3ee3b 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -27,7 +27,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t art_quick_is_assignable(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -58,7 +58,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { DefaultInitEntryPoints(jpoints, qpoints); // Cast - qpoints->pInstanceofNonTrivial = art_quick_is_assignable; + qpoints->pInstanceofNonTrivial = art_quick_instance_of; qpoints->pCheckInstanceOf = art_quick_check_instance_of; // More math. diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 761a510bfe..fb405fac0f 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1351,15 +1351,15 @@ DEFINE_FUNCTION art_quick_unlock_object_no_inline RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object_no_inline -DEFINE_FUNCTION art_quick_is_assignable +DEFINE_FUNCTION art_quick_instance_of PUSH eax // alignment padding PUSH ecx // pass arg2 - obj->klass PUSH eax // pass arg1 - checked class - call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) + call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass) addl LITERAL(12), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-12) ret -END_FUNCTION art_quick_is_assignable +END_FUNCTION art_quick_instance_of DEFINE_FUNCTION art_quick_check_instance_of PUSH eax // alignment padding diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 6b66e62bf4..a326b4eebc 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -30,7 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t art_quick_assignable_from_code(mirror::Class* klass, mirror::Class* ref_class); +extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -81,7 +81,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { DefaultInitEntryPoints(jpoints, qpoints); // Cast - qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code; + qpoints->pInstanceofNonTrivial = art_quick_instance_of; qpoints->pCheckInstanceOf = art_quick_check_instance_of; // More math. diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 20ee3f5b3a..860b77efe3 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -2223,16 +2223,16 @@ END_FUNCTION art_quick_string_compareto UNIMPLEMENTED art_quick_memcmp16 -DEFINE_FUNCTION art_quick_assignable_from_code +DEFINE_FUNCTION art_quick_instance_of SETUP_FP_CALLEE_SAVE_FRAME subq LITERAL(8), %rsp // Alignment padding. CFI_ADJUST_CFA_OFFSET(8) - call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*) + call SYMBOL(artInstanceOfFromCode) // (mirror::Object*, mirror::Class*) addq LITERAL(8), %rsp CFI_ADJUST_CFA_OFFSET(-8) RESTORE_FP_CALLEE_SAVE_FRAME ret -END_FUNCTION art_quick_assignable_from_code +END_FUNCTION art_quick_instance_of // Return from a nested signal: diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index dd8fe55420..a1c5082c93 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -33,7 +33,7 @@ V(AllocStringFromChars, void*, int32_t, int32_t, void*) \ V(AllocStringFromString, void*, void*) \ \ - V(InstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*) \ + V(InstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*) \ V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \ \ V(InitializeStaticStorage, void*, uint32_t) \ diff --git a/runtime/oat.h b/runtime/oat.h index 814a4934e7..3aef707dc3 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '9', '0', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '9', '1', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; |