summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Mathieu Chartier <mathieuc@google.com> 2016-11-08 23:14:22 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2016-11-08 23:14:22 +0000
commitda40ac056c8f8913cc1168cf0a0215ec992cd60c (patch)
treed437a440c08c718ffc8f52c536e88e8cbaf9d719
parent8bab69aafb3039f1dfe4aad4faf2a99986018abb (diff)
parentb99f4d6463e7cb5654af3893ed7b3113665df658 (diff)
Merge "Change check cast entrypoint to check instance of"
-rw-r--r--compiler/optimizing/code_generator_arm.cc107
-rw-r--r--compiler/optimizing/code_generator_arm64.cc92
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc30
-rw-r--r--compiler/optimizing/code_generator_mips.cc21
-rw-r--r--compiler/optimizing/code_generator_mips64.cc21
-rw-r--r--compiler/optimizing/code_generator_x86.cc87
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc99
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc5
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S15
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc5
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S15
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc7
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S15
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc6
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S15
-rw-r--r--runtime/arch/stub_test.cc84
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc5
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S12
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc5
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S8
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc8
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h4
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc8
-rw-r--r--runtime/entrypoints_order_test.cc5
-rw-r--r--runtime/thread.cc2
26 files changed, 298 insertions, 385 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7cab97d2e5..08d22f84d0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -489,8 +489,14 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -504,26 +510,26 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6297,26 +6303,16 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- Label loop, compare_classes;
+ Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
- __ CompareAndBranchIfNonZero(temp, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes.
__ cmp(temp, ShifterOperand(cls));
__ b(&loop, NE);
break;
@@ -6332,55 +6328,29 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // back at the beginning of the loop.
- __ CompareAndBranchIfNonZero(temp, &loop);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, jump to the beginning of the loop.
+ __ b(&loop);
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- Label check_non_primitive_component_type;
__ cmp(temp, ShifterOperand(cls));
__ b(&done, EQ);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
-
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
- __ CompareAndBranchIfNonZero(temp, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
+ // to further check that this component type is not a primitive type.
__ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot");
- __ CompareAndBranchIfZero(temp, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel());
break;
}
@@ -6396,13 +6366,6 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
- //
- // TODO: Introduce a new runtime entry point taking the object
- // to test (instead of its class) as argument, and let it deal
- // with the read barrier issues. This will let us refactor this
- // case of the `switch` code as it was previously (with a direct
- // call to the runtime not using a type checking slow path).
- // This should also be beneficial for the other cases above.
__ b(type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d868984387..f5119df4e9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -459,9 +459,15 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location class_to_check = locations->InAt(1);
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
+
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -476,21 +482,22 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
- object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ LocationFrom(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t,
- const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- arm64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -3594,26 +3601,15 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::aarch64::Label loop, compare_classes;
+ vixl::aarch64::Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
- __ Cbnz(temp, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&compare_classes);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, compare classes.
__ Cmp(temp, cls);
__ B(ne, &loop);
break;
@@ -3633,20 +3629,12 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// back at the beginning of the loop.
__ Cbnz(temp, &loop);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
__ B(type_check_slow_path->GetEntryLabel());
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::aarch64::Label check_non_primitive_component_type;
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3654,30 +3642,13 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
- __ Cbnz(temp, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, the object is indeed an array. Further check that this component type is not a
+ // primitive type.
__ Ldrh(temp, HeapOperand(temp, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ Cbz(temp, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
+ __ Cbnz(temp, type_check_slow_path->GetEntryLabel());
break;
}
@@ -3693,13 +3664,6 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
- //
- // TODO: Introduce a new runtime entry point taking the object
- // to test (instead of its class) as argument, and let it deal
- // with the read barrier issues. This will let us refactor this
- // case of the `switch` code as it was previously (with a direct
- // call to the runtime not using a type checking slow path).
- // This should also be beneficial for the other cases above.
__ B(type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index e69528e43f..87f6c89ac3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -443,8 +443,14 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -458,20 +464,22 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- LocationFrom(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- LocationFrom(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
+ codegen->EmitParallelMoves(arg0,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ LocationFrom(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
TODO_VIXL32(FATAL);
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 12b1ab9abb..6e9fbd2560 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -378,7 +378,14 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -390,24 +397,22 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(locations->InAt(1),
+ codegen->EmitParallelMoves(arg0,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class,
+ arg1,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
-
if (instruction_->IsInstanceOf()) {
mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
RestoreLiveRegisters(codegen, locations);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 010bf24232..7598740d3c 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -322,7 +322,15 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
+
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -334,24 +342,23 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(locations->InAt(1),
+ codegen->EmitParallelMoves(arg0,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class,
+ arg1,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
-
if (instruction_->IsInstanceOf()) {
mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
RestoreLiveRegisters(codegen, locations);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 2f946e4263..c90227930c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -312,8 +312,14 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -327,25 +333,25 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ x86_codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ x86_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6645,26 +6651,17 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop, compare_classes;
+ NearLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
__ testl(temp, temp);
- __ j(kNotEqual, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -6693,21 +6690,14 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
__ testl(temp, temp);
- __ j(kNotEqual, &loop);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
+ __ j(kNotZero, &loop);
+ // Otherwise, jump to the slow path to throw the exception.;
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -6720,28 +6710,13 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
+ // If the component type is null (i.e. the object not an array), jump to the slow path to
+ // throw the exception. Otherwise proceed with the check.
__ testl(temp, temp);
- __ j(kNotEqual, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
- __ Bind(&check_non_primitive_component_type);
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kEqual, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 232c3b3cbb..89b16d3f77 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -332,8 +332,14 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -348,22 +354,19 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6100,30 +6103,16 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
kEmitCompilerReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop, compare_classes;
+ NearLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
__ testl(temp, temp);
- __ j(kNotEqual, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes.
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -6166,18 +6155,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
__ testl(temp, temp);
- __ j(kNotEqual, &loop);
+ __ j(kNotZero, &loop);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
@@ -6220,31 +6199,10 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// to further check that this component type is not a primitive
// type.
__ testl(temp, temp);
- __ j(kNotEqual, &check_non_primitive_component_type);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kEqual, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
}
@@ -6316,13 +6274,6 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotZero, &start_loop);
__ Bind(&is_null);
}
-
- // Since we clobbered temp_loc holding the class, we need to reload it.
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index cb8edffb94..01b3f349d4 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -73,7 +72,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
qpoints->pIdivmod = __aeabi_idivmod;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0135260de8..550f8c7727 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -764,11 +764,12 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artIsAssignableFromCode and on failure calls
- * artThrowClassCastException.
+ * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+ * artThrowClassCastExceptionForObject.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
push {r0-r1, lr} @ save arguments, link register and pad
.cfi_adjust_cfa_offset 12
.cfi_rel_offset r0, 0
@@ -776,7 +777,7 @@ ENTRY art_quick_check_cast
.cfi_rel_offset lr, 8
sub sp, #4
.cfi_adjust_cfa_offset 4
- bl artIsAssignableFromCode
+ bl artInstanceOfFromCode
cbz r0, .Lthrow_class_cast_exception
add sp, #4
.cfi_adjust_cfa_offset -4
@@ -792,9 +793,9 @@ ENTRY art_quick_check_cast
.cfi_restore lr
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
- bl artThrowClassCastException @ (Class*, Class*, Thread*)
+ bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
bkpt
-END art_quick_check_cast
+END art_quick_check_instance_of
// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
.macro POP_REG_NE rReg, offset, rExclude
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index c2078f02c1..3c77672aac 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -76,7 +75,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
// TODO null entrypoints not needed for ARM64 - generate inline.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index d806715ec9..d8ebe262bc 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1294,18 +1294,19 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artIsAssignableFromCode and on failure calls
- * artThrowClassCastException.
+ * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+ * artThrowClassCastExceptionForObject.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
// Store arguments and link register
// Stack needs to be 16B aligned on calls.
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
SAVE_REG xLR, 24
// Call runtime code
- bl artIsAssignableFromCode
+ bl artInstanceOfFromCode
// Check for exception
cbz x0, .Lthrow_class_cast_exception
@@ -1324,9 +1325,9 @@ ENTRY art_quick_check_cast
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
- bl artThrowClassCastException // (Class*, Class*, Thread*)
+ bl artThrowClassCastExceptionForObject // (Object*, Class*, Thread*)
brk 0 // We should not return here...
-END art_quick_check_cast
+END art_quick_check_instance_of
// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
.macro POP_REG_NE xReg, offset, xExclude
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e10d4e6a74..e3230f65dd 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -73,8 +72,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
- qpoints->pCheckCast = art_quick_check_cast;
- static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct.");
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
+ static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
// DexCache
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c3c188233b..34e34b40ff 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1171,10 +1171,11 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
addiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sw $gp, 16($sp)
@@ -1183,7 +1184,7 @@ ENTRY art_quick_check_cast
sw $t9, 8($sp)
sw $a1, 4($sp)
sw $a0, 0($sp)
- la $t9, artIsAssignableFromCode
+ la $t9, artInstanceOfFromCode
jalr $t9
addiu $sp, $sp, -16 # reserve argument slots on the stack
addiu $sp, $sp, 16
@@ -1200,10 +1201,10 @@ ENTRY art_quick_check_cast
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artThrowClassCastException
- jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ la $t9, artThrowClassCastExceptionForObject
+ jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
/*
* Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index a0379053bc..43b73f127a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -30,8 +30,8 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -64,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
qpoints->pCmpgDouble = CmpgDouble;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index cb2d1c816b..0861d2d73e 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1256,10 +1256,11 @@ ENTRY_NO_GP art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
daddiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sd $ra, 24($sp)
@@ -1267,7 +1268,7 @@ ENTRY art_quick_check_cast
sd $t9, 16($sp)
sd $a1, 8($sp)
sd $a0, 0($sp)
- jal artIsAssignableFromCode
+ jal artInstanceOfFromCode
.cpreturn # Restore gp from t8 in branch delay slot.
# t8 may be clobbered in artIsAssignableFromCode.
beq $v0, $zero, .Lthrow_class_cast_exception
@@ -1283,10 +1284,10 @@ ENTRY art_quick_check_cast
.cfi_adjust_cfa_offset -32
SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- dla $t9, artThrowClassCastException
- jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ dla $t9, artThrowClassCastExceptionForObject
+ jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
/*
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c5a2c75118..bbf9a8b93c 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -806,7 +806,7 @@ TEST_F(StubTest, UnlockObject) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_check_cast(void);
+extern "C" void art_quick_check_instance_of(void);
#endif
TEST_F(StubTest, CheckCast) {
@@ -814,65 +814,89 @@ TEST_F(StubTest, CheckCast) {
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
- const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
+ const uintptr_t art_quick_check_instance_of =
+ StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
// Find some classes.
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- StackHandleScope<4> hs(soa.Self());
- Handle<mirror::Class> c(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
- Handle<mirror::Class> c2(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
- Handle<mirror::Class> list(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/List;")));
- Handle<mirror::Class> array_list(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/ArrayList;")));
+ VariableSizedHandleScope hs(soa.Self());
+ Handle<mirror::Class> klass_obj(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ Handle<mirror::Class> klass_str(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
+ Handle<mirror::Class> klass_list(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
+ Handle<mirror::Class> klass_cloneable(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
+ Handle<mirror::Class> klass_array_list(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
+ Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
+ Handle<mirror::String> string(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
+ Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.Get()),
- reinterpret_cast<size_t>(c.Get()),
+ Invoke3(reinterpret_cast<size_t>(obj.Get()),
+ reinterpret_cast<size_t>(klass_obj.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c2.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ // Expected true: Test string instance of java.lang.String.
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_str.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ // Expected true: Test string instance of java.lang.Object.
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_obj.Get()),
+ 0U,
+ art_quick_check_instance_of,
+ self);
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ // Expected false: Test object instance of java.lang.String.
+ Invoke3(reinterpret_cast<size_t>(obj.Get()),
+ reinterpret_cast<size_t>(klass_str.Get()),
+ 0U,
+ art_quick_check_instance_of,
+ self);
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+ Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+ reinterpret_cast<size_t>(klass_list.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(list.Get()),
- reinterpret_cast<size_t>(array_list.Get()),
+ Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+ reinterpret_cast<size_t>(klass_cloneable.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(list.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_array_list.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
- // TODO: Make the following work. But that would require correct managed frames.
- Invoke3(reinterpret_cast<size_t>(c2.Get()),
- reinterpret_cast<size_t>(c.Get()),
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_cloneable.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 0a10a3cceb..877df8f7b0 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -27,8 +27,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_is_assignable(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_is_assignable(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -50,7 +49,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
qpoints->pCos = cos;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 98739d3cc8..635bfa3c2c 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1361,11 +1361,11 @@ DEFINE_FUNCTION art_quick_is_assignable
ret
END_FUNCTION art_quick_is_assignable
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
PUSH eax // alignment padding
- PUSH ecx // pass arg2 - obj->klass
- PUSH eax // pass arg1 - checked class
- call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ PUSH ecx // pass arg2 - checked class
+ PUSH eax // pass arg1 - obj
+ call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
@@ -1385,9 +1385,9 @@ DEFINE_FUNCTION art_quick_check_cast
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+ call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
MACRO2(POP_REG_NE, reg, exclude_reg)
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 8c425d53d3..59c9dfeb6f 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_assignable_from_code(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -65,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
qpoints->pCos = cos;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 185e55e114..72a03ebcbe 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1480,14 +1480,14 @@ DEFINE_FUNCTION art_quick_unlock_object_no_inline
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
// We could check the super classes here but that is usually already checked in the caller.
PUSH rdi // Save args for exc
PUSH rsi
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
SETUP_FP_CALLEE_SAVE_FRAME
- call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
@@ -1506,9 +1506,9 @@ DEFINE_FUNCTION art_quick_check_cast
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+ call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 2732d687b5..083d5786ce 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -27,4 +27,12 @@ extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* r
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
+// Is assignable test for code, won't throw. Null and equality test already performed.
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ DCHECK(ref_class != nullptr);
+ return obj->InstanceOf(ref_class) ? 1 : 0;
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index cfa5325e45..64030f36bc 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -31,7 +31,7 @@ class ArtMethod;
// These are extern declarations of assembly stubs with common names.
// Cast entrypoints.
-extern "C" void art_quick_check_cast(const art::mirror::Class*, const art::mirror::Class*);
+extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*);
// DexCache entrypoints.
extern "C" void* art_quick_initialize_static_storage(uint32_t);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 3cfee45462..dd8fe55420 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -33,8 +33,8 @@
V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
V(AllocStringFromString, void*, void*) \
\
- V(InstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*) \
- V(CheckCast, void, const mirror::Class*, const mirror::Class*) \
+ V(InstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*) \
+ V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
\
V(InitializeStaticStorage, void*, uint32_t) \
V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index a205b17f1b..c8ee99a5d9 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -111,6 +111,14 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
self->QuickDeliverException();
}
+extern "C" NO_RETURN void artThrowClassCastExceptionForObject(mirror::Object* obj,
+ mirror::Class* dest_type,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ artThrowClassCastException(dest_type, obj->GetClass(), self);
+}
+
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cdb1051e08..b0463d7f11 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -174,8 +174,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromString, pInstanceofNonTrivial,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1e3d4db7d7..bd5edaa421 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2503,7 +2503,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
- QUICK_ENTRY_POINT_INFO(pCheckCast)
+ QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
QUICK_ENTRY_POINT_INFO(pInitializeType)