summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc54
-rw-r--r--compiler/optimizing/code_generator_arm.cc313
-rw-r--r--compiler/optimizing/code_generator_arm64.cc348
-rw-r--r--compiler/optimizing/code_generator_mips64.cc4
-rw-r--r--compiler/optimizing/code_generator_x86.cc363
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc366
-rw-r--r--compiler/optimizing/graph_checker.cc27
-rw-r--r--compiler/optimizing/intrinsics_x86.cc78
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc69
-rw-r--r--compiler/optimizing/nodes.h42
-rw-r--r--compiler/optimizing/ssa_builder.cc26
11 files changed, 1421 insertions, 269 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 3012346a95..274a2a699f 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1614,25 +1614,48 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
}
}
+static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (cls->IsInterface()) {
+ return TypeCheckKind::kInterfaceCheck;
+ } else if (cls->IsArrayClass()) {
+ if (cls->GetComponentType()->IsObjectClass()) {
+ return TypeCheckKind::kArrayObjectCheck;
+ } else if (cls->CannotBeAssignedFromOtherTypes()) {
+ return TypeCheckKind::kExactCheck;
+ } else {
+ return TypeCheckKind::kArrayCheck;
+ }
+ } else if (cls->IsFinal()) {
+ return TypeCheckKind::kExactCheck;
+ } else if (cls->IsAbstract()) {
+ return TypeCheckKind::kAbstractClassCheck;
+ } else {
+ return TypeCheckKind::kClassHierarchyCheck;
+ }
+}
+
bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
uint16_t type_index,
uint32_t dex_pc) {
- bool type_known_final;
- bool type_known_abstract;
- // `CanAccessTypeWithoutChecks` will tell whether the method being
- // built is trying to access its own class, so that the generated
- // code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsOutermostCompilingClass` instead.
- bool dont_use_is_referrers_class;
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
- if (!can_access) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(
+ soa.Self(), *dex_compilation_unit_->GetDexFile())));
+ Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
+
+ if ((resolved_class.Get() == nullptr) ||
+ // TODO: Remove this check once the compiler actually knows which
+ // ArtMethod it is compiling.
+ (GetCompilingClass() == nullptr) ||
+ !GetCompilingClass()->CanAccess(resolved_class.Get())) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
+
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot, dex_pc);
HLoadClass* cls = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
@@ -1641,17 +1664,18 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
IsOutermostCompilingClass(type_index),
dex_pc);
current_block_->AddInstruction(cls);
+
// The class needs a temporary before being used by the type check.
Temporaries temps(graph_);
temps.Add(cls);
+
+ TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
- current_block_->AddInstruction(
- new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
+ current_block_->AddInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction(), dex_pc);
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
- current_block_->AddInstruction(
- new (arena_) HCheckCast(object, cls, type_known_final, dex_pc));
+ current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
}
return true;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 55c9214b6c..d431acfb53 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -271,7 +271,8 @@ class LoadStringSlowPathARM : public SlowPathCode {
class TypeCheckSlowPathARM : public SlowPathCode {
public:
- explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
+ TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
+ : instruction_(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -282,7 +283,19 @@ class TypeCheckSlowPathARM : public SlowPathCode {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+
+ if (instruction_->IsCheckCast()) {
+ // The codegen for the instruction overwrites `temp`, so put it back in place.
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -309,14 +322,19 @@ class TypeCheckSlowPathARM : public SlowPathCode {
this);
}
- RestoreLiveRegisters(codegen, locations);
- __ b(GetExitLabel());
+ if (!is_fatal_) {
+ RestoreLiveRegisters(codegen, locations);
+ __ b(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
+
private:
HInstruction* const instruction_;
+ const bool is_fatal_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -4357,15 +4375,34 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
}
void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind = instruction->IsClassFinal()
- ? LocationSummary::kNoCall
- : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // The out register is used as a temporary, so it overlaps with the inputs.
- // Note that TypeCheckSlowPathARM uses this register too.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The out register is used as a temporary, so it overlaps with the inputs.
+ // Note that TypeCheckSlowPathARM uses this register too.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
+ }
}
void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4374,6 +4411,9 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
Register cls = locations->InAt(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
Label done, zero;
SlowPathCode* slow_path = nullptr;
@@ -4382,67 +4422,242 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
if (instruction->MustDoNullCheck()) {
__ CompareAndBranchIfZero(obj, &zero);
}
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- __ MaybeUnpoisonHeapReference(out);
- __ cmp(out, ShifterOperand(cls));
- if (instruction->IsClassFinal()) {
- // Classes must be equal for the instanceof to succeed.
- __ b(&zero, NE);
- __ LoadImmediate(out, 1);
- __ b(&done);
- } else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
- codegen_->AddSlowPath(slow_path);
- __ b(slow_path->GetEntryLabel(), NE);
- __ LoadImmediate(out, 1);
- __ b(&done);
+
+ // In case of an interface check, we put the object class into the object register.
+ // This is safe, as the register is caller-save, and the object must be in another
+ // register if it survives the runtime call.
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ ? obj
+ : out;
+ __ LoadFromOffset(kLoadWord, target, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(target);
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck: {
+ __ cmp(out, ShifterOperand(cls));
+ // Classes must be equal for the instanceof to succeed.
+ __ b(&zero, NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ Label loop;
+ __ Bind(&loop);
+ __ LoadFromOffset(kLoadWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ CompareAndBranchIfZero(out, &done);
+ __ cmp(out, ShifterOperand(cls));
+ __ b(&loop, NE);
+ __ LoadImmediate(out, 1);
+ if (zero.IsLinked()) {
+ __ b(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ Label loop, success;
+ __ Bind(&loop);
+ __ cmp(out, ShifterOperand(cls));
+ __ b(&success, EQ);
+ __ LoadFromOffset(kLoadWord, out, out, super_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ __ CompareAndBranchIfNonZero(out, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ b(&done);
+ __ Bind(&success);
+ __ LoadImmediate(out, 1);
+ if (zero.IsLinked()) {
+ __ b(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ LoadFromOffset(kLoadWord, out, out, component_offset);
+ __ MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ CompareAndBranchIfZero(out, &done);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(out, &zero);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ break;
+ }
+ case TypeCheckKind::kArrayCheck: {
+ __ cmp(out, ShifterOperand(cls));
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ LoadImmediate(out, 1);
+ if (zero.IsLinked()) {
+ __ b(&done);
+ }
+ break;
+ }
+
+ case TypeCheckKind::kInterfaceCheck:
+ default: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ if (zero.IsLinked()) {
+ __ b(&done);
+ }
+ break;
+ }
}
- if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ if (zero.IsLinked()) {
__ Bind(&zero);
__ LoadImmediate(out, 0);
}
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
- __ Bind(&done);
}
void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, LocationSummary::kCallOnSlowPath);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // Note that TypeCheckSlowPathARM uses this register too.
- locations->AddTemp(Location::RequiresRegister());
+ instruction, call_kind);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathARM uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ }
}
void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register temp = locations->WillCall()
+ ? Register(kNoRegister)
+ : locations->GetTemp(0).AsRegister<Register>();
+
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ SlowPathCode* slow_path = nullptr;
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
- codegen_->AddSlowPath(slow_path);
+ if (!locations->WillCall()) {
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, !locations->CanCall());
+ codegen_->AddSlowPath(slow_path);
+ }
- // avoid null check if we know obj is not null.
+ Label done;
+ // Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
+ __ CompareAndBranchIfZero(obj, &done);
+ }
+
+ if (locations->WillCall()) {
+ __ LoadFromOffset(kLoadWord, obj, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(obj);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ __ cmp(temp, ShifterOperand(cls));
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ b(slow_path->GetEntryLabel(), NE);
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ Label loop;
+ __ Bind(&loop);
+ __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ // Jump to the slow path to throw the exception.
+ __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(&loop, NE);
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ Label loop, success;
+ __ Bind(&loop);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(&success, EQ);
+ __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ CompareAndBranchIfNonZero(temp, &loop);
+ // Jump to the slow path to throw the exception.
+ __ b(slow_path->GetEntryLabel());
+ __ Bind(&success);
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kInterfaceCheck:
+ default:
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ break;
+ }
+ __ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
- // Compare the class of `obj` with `cls`.
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- __ MaybeUnpoisonHeapReference(temp);
- __ cmp(temp, ShifterOperand(cls));
- // The checkcast succeeds if the classes are equal (fast path).
- // Otherwise, we need to go into the slow path to check the types.
- __ b(slow_path->GetEntryLabel(), NE);
- __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 531b66927b..580e93e9c4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -409,7 +409,8 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- explicit TypeCheckSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
+ TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
+ : instruction_(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -422,7 +423,19 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
uint32_t dex_pc = instruction_->GetDexPc();
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+
+ if (instruction_->IsCheckCast()) {
+ // The codegen for the instruction overwrites `temp`, so put it back in place.
+ Register obj = InputRegisterAt(instruction_, 0);
+ Register temp = WRegisterFrom(locations->GetTemp(0));
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ __ Ldr(temp, HeapOperand(obj, class_offset));
+ arm64_codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ }
+
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -445,14 +458,18 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
- RestoreLiveRegisters(codegen, locations);
- __ B(GetExitLabel());
+ if (!is_fatal_) {
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
+ bool IsFatal() const { return is_fatal_; }
private:
HInstruction* const instruction_;
+ const bool is_fatal_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
};
@@ -1629,38 +1646,6 @@ void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction)
__ B(slow_path->GetEntryLabel(), hs);
}
-void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, LocationSummary::kCallOnSlowPath);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // Note that TypeCheckSlowPathARM64 uses this register too.
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
- Register obj = InputRegisterAt(instruction, 0);;
- Register cls = InputRegisterAt(instruction, 1);;
- Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
-
- SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
- codegen_->AddSlowPath(slow_path);
-
- // Avoid null check if we know obj is not null.
- if (instruction->MustDoNullCheck()) {
- __ Cbz(obj, slow_path->GetExitLabel());
- }
- // Compare the class of `obj` with `cls`.
- __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
- GetAssembler()->MaybeUnpoisonHeapReference(obj_cls.W());
- __ Cmp(obj_cls, cls);
- // The checkcast succeeds if the classes are equal (fast path).
- // Otherwise, we need to go into the slow path to check the types.
- __ B(ne, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
-}
-
void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
@@ -2254,50 +2239,291 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins
}
void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind =
- instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- // The output does overlap inputs.
- // Note that TypeCheckSlowPathARM64 uses this register too.
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The out register is used as a temporary, so it overlaps with the inputs.
+ // Note that TypeCheckSlowPathARM64 uses this register too.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ }
}
void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = InputRegisterAt(instruction, 0);;
- Register cls = InputRegisterAt(instruction, 1);;
+ Register obj = InputRegisterAt(instruction, 0);
+ Register cls = InputRegisterAt(instruction, 1);
Register out = OutputRegister(instruction);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- vixl::Label done;
+ vixl::Label done, zero;
+ SlowPathCodeARM64* slow_path = nullptr;
// Return 0 if `obj` is null.
// Avoid null check if we know `obj` is not null.
if (instruction->MustDoNullCheck()) {
+ __ Cbz(obj, &zero);
+ }
+
+ // In case of an interface check, we put the object class into the object register.
+ // This is safe, as the register is caller-save, and the object must be in another
+ // register if it survives the runtime call.
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ ? obj
+ : out;
+ __ Ldr(target, HeapOperand(obj.W(), class_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(target);
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck: {
+ __ Cmp(out, cls);
+ __ Cset(out, eq);
+ if (zero.IsLinked()) {
+ __ B(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ vixl::Label loop, success;
+ __ Bind(&loop);
+ __ Ldr(out, HeapOperand(out, super_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Cbz(out, &done);
+ __ Cmp(out, cls);
+ __ B(ne, &loop);
+ __ Mov(out, 1);
+ if (zero.IsLinked()) {
+ __ B(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ vixl::Label loop, success;
+ __ Bind(&loop);
+ __ Cmp(out, cls);
+ __ B(eq, &success);
+ __ Ldr(out, HeapOperand(out, super_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(out);
+ __ Cbnz(out, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ B(&done);
+ __ Bind(&success);
+ __ Mov(out, 1);
+ if (zero.IsLinked()) {
+ __ B(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ Ldr(out, HeapOperand(out, component_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ Cbz(out, &done);
+ __ Ldrh(out, HeapOperand(out, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(out, &zero);
+ __ Mov(out, 1);
+ __ B(&done);
+ break;
+ }
+ case TypeCheckKind::kArrayCheck: {
+ __ Cmp(out, cls);
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
+ instruction, /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ B(ne, slow_path->GetEntryLabel());
+ __ Mov(out, 1);
+ if (zero.IsLinked()) {
+ __ B(&done);
+ }
+ break;
+ }
+
+ case TypeCheckKind::kInterfaceCheck:
+ default: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ if (zero.IsLinked()) {
+ __ B(&done);
+ }
+ break;
+ }
+ }
+
+ if (zero.IsLinked()) {
+ __ Bind(&zero);
__ Mov(out, 0);
- __ Cbz(obj, &done);
}
- // Compare the class of `obj` with `cls`.
- __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
- GetAssembler()->MaybeUnpoisonHeapReference(out.W());
- __ Cmp(out, cls);
- if (instruction->IsClassFinal()) {
- // Classes must be equal for the instanceof to succeed.
- __ Cset(out, eq);
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, call_kind);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathARM64 uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
} else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);
+ Register cls = InputRegisterAt(instruction, 1);
+ Register temp;
+ if (!locations->WillCall()) {
+ temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
+ }
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ SlowPathCodeARM64* slow_path = nullptr;
+
+ if (!locations->WillCall()) {
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
+ instruction, !locations->CanCall());
codegen_->AddSlowPath(slow_path);
- __ B(ne, slow_path->GetEntryLabel());
- __ Mov(out, 1);
- __ Bind(slow_path->GetExitLabel());
}
+ vixl::Label done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Cbz(obj, &done);
+ }
+
+ if (locations->WillCall()) {
+ __ Ldr(obj, HeapOperand(obj, class_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(obj);
+ } else {
+ __ Ldr(temp, HeapOperand(obj, class_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ }
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ __ Cmp(temp, cls);
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ B(ne, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ vixl::Label loop;
+ __ Bind(&loop);
+ __ Ldr(temp, HeapOperand(temp, super_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ // Jump to the slow path to throw the exception.
+ __ Cbz(temp, slow_path->GetEntryLabel());
+ __ Cmp(temp, cls);
+ __ B(ne, &loop);
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ vixl::Label loop, success;
+ __ Bind(&loop);
+ __ Cmp(temp, cls);
+ __ B(eq, &success);
+ __ Ldr(temp, HeapOperand(temp, super_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ __ Cbnz(temp, &loop);
+ // Jump to the slow path to throw the exception.
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(&success);
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ Ldr(temp, HeapOperand(temp, component_offset));
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ __ Cbz(temp, slow_path->GetEntryLabel());
+ __ Ldrh(temp, HeapOperand(temp, primitive_offset));
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kInterfaceCheck:
+ default:
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ break;
+ }
__ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index bf0d2e2a11..4722e42694 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2281,7 +2281,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* in
void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind =
- instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2305,7 +2305,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
- if (instruction->IsClassFinal()) {
+ if (instruction->IsExactCheck()) {
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 09e939de47..3d03dd8146 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -287,7 +287,8 @@ class LoadClassSlowPathX86 : public SlowPathCode {
class TypeCheckSlowPathX86 : public SlowPathCode {
public:
- explicit TypeCheckSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
+ TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal)
+ : instruction_(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -298,7 +299,19 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+
+ if (instruction_->IsCheckCast()) {
+ // The codegen for the instruction overwrites `temp`, so put it back in place.
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ __ movl(temp, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -324,18 +337,22 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
this);
}
- if (instruction_->IsInstanceOf()) {
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
- }
- RestoreLiveRegisters(codegen, locations);
+ if (!is_fatal_) {
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
+ RestoreLiveRegisters(codegen, locations);
- __ jmp(GetExitLabel());
+ __ jmp(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
private:
HInstruction* const instruction_;
+ const bool is_fatal_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -4956,14 +4973,33 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
}
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind = instruction->IsClassFinal()
- ? LocationSummary::kNoCall
- : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86 uses this register too.
- locations->SetOut(Location::RequiresRegister());
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this register too.
+ locations->SetOut(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+ }
}
void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4972,8 +5008,11 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
Location cls = locations->InAt(1);
Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- NearLabel done, zero;
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
SlowPathCode* slow_path = nullptr;
+ NearLabel done, zero;
// Return 0 if `obj` is null.
// Avoid null check if we know obj is not null.
@@ -4981,78 +5020,282 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ testl(obj, obj);
__ j(kEqual, &zero);
}
- // Compare the class of `obj` with `cls`.
- __ movl(out, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(out);
- if (cls.IsRegister()) {
- __ cmpl(out, cls.AsRegister<Register>());
- } else {
- DCHECK(cls.IsStackSlot()) << cls;
- __ cmpl(out, Address(ESP, cls.GetStackIndex()));
- }
- if (instruction->IsClassFinal()) {
- // Classes must be equal for the instanceof to succeed.
- __ j(kNotEqual, &zero);
- __ movl(out, Immediate(1));
- __ jmp(&done);
- } else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
- codegen_->AddSlowPath(slow_path);
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ movl(out, Immediate(1));
- __ jmp(&done);
+ // In case of an interface check, we put the object class into the object register.
+ // This is safe, as the register is caller-save, and the object must be in another
+ // register if it survives the runtime call.
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ ? obj
+ : out;
+ __ movl(target, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(target);
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ NearLabel loop;
+ __ Bind(&loop);
+ __ movl(out, Address(out, super_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ j(kEqual, &done);
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+ __ j(kNotEqual, &loop);
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+ __ j(kEqual, &success);
+ __ movl(out, Address(out, super_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ __ j(kNotEqual, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ jmp(&done);
+ __ Bind(&success);
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ movl(out, Address(out, component_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ j(kEqual, &done);
+ __ cmpw(Address(out, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ break;
+ }
+ case TypeCheckKind::kArrayCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+
+ case TypeCheckKind::kInterfaceCheck:
+ default: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
}
- if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ if (zero.IsLinked()) {
__ Bind(&zero);
- __ movl(out, Immediate(0));
+ __ xorl(out, out);
+ }
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
}
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
- __ Bind(&done);
}
void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, LocationSummary::kCallOnSlowPath);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86 uses this register too.
- locations->AddTemp(Location::RequiresRegister());
+ instruction, call_kind);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86 uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ }
}
void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Location cls = locations->InAt(1);
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register temp = locations->WillCall()
+ ? kNoRegister
+ : locations->GetTemp(0).AsRegister<Register>();
+
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
- codegen_->AddSlowPath(slow_path);
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ SlowPathCode* slow_path = nullptr;
+ if (!locations->WillCall()) {
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, !locations->CanCall());
+ codegen_->AddSlowPath(slow_path);
+ }
+
+ NearLabel done, abstract_entry;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
+ __ j(kEqual, &done);
}
- // Compare the class of `obj` with `cls`.
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- if (cls.IsRegister()) {
- __ cmpl(temp, cls.AsRegister<Register>());
+
+ if (locations->WillCall()) {
+ __ movl(obj, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(obj);
} else {
- DCHECK(cls.IsStackSlot()) << cls;
- __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ __ movl(temp, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ __ movl(temp, Address(temp, super_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ // Jump to the slow path to throw the exception.
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+ __ j(kNotEqual, &loop);
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+ __ j(kEqual, &success);
+ __ movl(temp, Address(temp, super_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ __ j(kNotEqual, &loop);
+ // Jump to the slow path to throw the exception.
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(&success);
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ movl(temp, Address(temp, component_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kInterfaceCheck:
+ default:
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ break;
+ }
+ __ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
- // The checkcast succeeds if the classes are equal (fast path).
- // Otherwise, we need to go into the slow path to check the types.
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 289ef641f0..32a1db5475 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -300,8 +300,8 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
class TypeCheckSlowPathX86_64 : public SlowPathCode {
public:
- explicit TypeCheckSlowPathX86_64(HInstruction* instruction)
- : instruction_(instruction) {}
+ TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal)
+ : instruction_(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -313,7 +313,19 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations);
+
+ if (instruction_->IsCheckCast()) {
+ // The codegen for the instruction overwrites `temp`, so put it back in place.
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ __ movl(temp, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ if (!is_fatal_) {
+ SaveLiveRegisters(codegen, locations);
+ }
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
@@ -339,18 +351,23 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
this);
}
- if (instruction_->IsInstanceOf()) {
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
- }
+ if (!is_fatal_) {
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
- RestoreLiveRegisters(codegen, locations);
- __ jmp(GetExitLabel());
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
+
private:
HInstruction* const instruction_;
+ const bool is_fatal_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -4684,14 +4701,33 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
}
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
- LocationSummary::CallKind call_kind = instruction->IsClassFinal()
- ? LocationSummary::kNoCall
- : LocationSummary::kCallOnSlowPath;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86_64 uses this register too.
- locations->SetOut(Location::RequiresRegister());
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this register too.
+ locations->SetOut(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+ }
}
void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4700,8 +4736,11 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
Location cls = locations->InAt(1);
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- NearLabel done, zero;
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
SlowPathCode* slow_path = nullptr;
+ NearLabel done, zero;
// Return 0 if `obj` is null.
// Avoid null check if we know obj is not null.
@@ -4709,77 +4748,282 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ testl(obj, obj);
__ j(kEqual, &zero);
}
- // Compare the class of `obj` with `cls`.
- __ movl(out, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(out);
- if (cls.IsRegister()) {
- __ cmpl(out, cls.AsRegister<CpuRegister>());
- } else {
- DCHECK(cls.IsStackSlot()) << cls;
- __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
- }
- if (instruction->IsClassFinal()) {
- // Classes must be equal for the instanceof to succeed.
- __ j(kNotEqual, &zero);
- __ movl(out, Immediate(1));
- __ jmp(&done);
- } else {
- // If the classes are not equal, we go into a slow path.
- DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
- codegen_->AddSlowPath(slow_path);
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ movl(out, Immediate(1));
- __ jmp(&done);
+
+ // In case of an interface check, we put the object class into the object register.
+ // This is safe, as the register is caller-save, and the object must be in another
+ // register if it survives the runtime call.
+ CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ ? obj
+ : out;
+ __ movl(target, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(target);
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ __ movl(out, Address(out, super_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ j(kEqual, &done);
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ __ j(kNotEqual, &loop);
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ __ j(kEqual, &success);
+ __ movl(out, Address(out, super_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ __ j(kNotEqual, &loop);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ jmp(&done);
+ __ Bind(&success);
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ movl(out, Address(out, component_offset));
+ __ MaybeUnpoisonHeapReference(out);
+ __ testl(out, out);
+ // If `out` is null, we use it for the result, and jump to `done`.
+ __ j(kEqual, &done);
+ __ cmpw(Address(out, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ break;
+ }
+ case TypeCheckKind::kArrayCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, /* is_fatal */ false);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
+
+ case TypeCheckKind::kInterfaceCheck:
+ default: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ if (zero.IsLinked()) {
+ __ jmp(&done);
+ }
+ break;
+ }
}
- if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ if (zero.IsLinked()) {
__ Bind(&zero);
- __ movl(out, Immediate(0));
+ __ xorl(out, out);
+ }
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
}
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
- __ Bind(&done);
}
void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kAbstractClassCheck:
+ case TypeCheckKind::kClassHierarchyCheck:
+ case TypeCheckKind::kArrayObjectCheck:
+ call_kind = throws_into_catch
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ break;
+ case TypeCheckKind::kInterfaceCheck:
+ call_kind = LocationSummary::kCall;
+ break;
+ case TypeCheckKind::kArrayCheck:
+ call_kind = LocationSummary::kCallOnSlowPath;
+ break;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, LocationSummary::kCallOnSlowPath);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::Any());
- // Note that TypeCheckSlowPathX86_64 uses this register too.
- locations->AddTemp(Location::RequiresRegister());
+ instruction, call_kind);
+ if (call_kind != LocationSummary::kCall) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ // Note that TypeCheckSlowPathX86_64 uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ }
}
void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location cls = locations->InAt(1);
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister temp = locations->WillCall()
+ ? CpuRegister(kNoRegister)
+ : locations->GetTemp(0).AsRegister<CpuRegister>();
+
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
- codegen_->AddSlowPath(slow_path);
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+ SlowPathCode* slow_path = nullptr;
+
+ if (!locations->WillCall()) {
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, !locations->CanCall());
+ codegen_->AddSlowPath(slow_path);
+ }
+ NearLabel done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
+ __ j(kEqual, &done);
}
- // Compare the class of `obj` with `cls`.
- __ movl(temp, Address(obj, class_offset));
- __ MaybeUnpoisonHeapReference(temp);
- if (cls.IsRegister()) {
- __ cmpl(temp, cls.AsRegister<CpuRegister>());
+
+ if (locations->WillCall()) {
+ __ movl(obj, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(obj);
} else {
- DCHECK(cls.IsStackSlot()) << cls;
- __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ __ movl(temp, Address(obj, class_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ }
+
+ switch (instruction->GetTypeCheckKind()) {
+ case TypeCheckKind::kExactCheck:
+ case TypeCheckKind::kArrayCheck: {
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Jump to slow path for throwing the exception or doing a
+ // more involved array check.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kAbstractClassCheck: {
+ // If the class is abstract, we eagerly fetch the super class of the
+ // object to avoid doing a comparison we know will fail.
+ NearLabel loop;
+ __ Bind(&loop);
+ __ movl(temp, Address(temp, super_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ // Jump to the slow path to throw the exception.
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ __ j(kNotEqual, &loop);
+ break;
+ }
+ case TypeCheckKind::kClassHierarchyCheck: {
+ // Walk over the class hierarchy to find a match.
+ NearLabel loop, success;
+ __ Bind(&loop);
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ __ j(kEqual, &success);
+ __ movl(temp, Address(temp, super_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ __ j(kNotEqual, &loop);
+ // Jump to the slow path to throw the exception.
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(&success);
+ break;
+ }
+ case TypeCheckKind::kArrayObjectCheck: {
+ // Just need to check that the object's class is a non primitive array.
+ __ movl(temp, Address(temp, component_offset));
+ __ MaybeUnpoisonHeapReference(temp);
+ __ testl(temp, temp);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ break;
+ }
+ case TypeCheckKind::kInterfaceCheck:
+ default:
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ break;
+ }
+ __ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
- // The checkcast succeeds if the classes are equal (fast path).
- // Otherwise, we need to go into the slow path to check the types.
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
}
void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index af8aa23de5..583da30438 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -624,8 +624,31 @@ void SSAChecker::VisitPhi(HPhi* phi) {
}
if (phi->IsCatchPhi()) {
- // The number of inputs of a catch phi corresponds to the total number of
- // throwing instructions caught by this catch block.
+ // The number of inputs of a catch phi should be the total number of throwing
+ // instructions caught by this catch block. We do not enforce this, however,
+ // because we do not remove the corresponding inputs when we prove that an
+ // instruction cannot throw. Instead, we at least test that all phis have the
+ // same, non-zero number of inputs (b/24054676).
+ size_t input_count_this = phi->InputCount();
+ if (input_count_this == 0u) {
+ AddError(StringPrintf("Phi %d in catch block %d has zero inputs.",
+ phi->GetId(),
+ phi->GetBlock()->GetBlockId()));
+ } else {
+ HInstruction* next_phi = phi->GetNext();
+ if (next_phi != nullptr) {
+ size_t input_count_next = next_phi->InputCount();
+ if (input_count_this != input_count_next) {
+ AddError(StringPrintf("Phi %d in catch block %d has %zu inputs, "
+ "but phi %d has %zu inputs.",
+ phi->GetId(),
+ phi->GetBlock()->GetBlockId(),
+ input_count_this,
+ next_phi->GetId(),
+ input_count_next));
+ }
+ }
+ }
} else {
// Ensure the number of inputs of a non-catch phi is the same as the number
// of its predecessors.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5becf0fb69..318d3a6ee8 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1328,6 +1328,83 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke)
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ // Place srcEnd in ECX to save a move below.
+ locations->SetInAt(2, Location::RegisterLocation(ECX));
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ // And we need some temporaries. We will use REP MOVSW, so we need fixed registers.
+ // We don't have enough registers to also grab ECX, so handle below.
+ locations->AddTemp(Location::RegisterLocation(ESI));
+ locations->AddTemp(Location::RegisterLocation(EDI));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ // Location of data in char array buffer.
+ const uint32_t data_offset = mirror::Array::DataOffset(char_component_size).Uint32Value();
+ // Location of char array data in string.
+ const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+ // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location srcBegin = locations->InAt(1);
+ int srcBegin_value =
+ srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
+ Register srcEnd = locations->InAt(2).AsRegister<Register>();
+ Register dst = locations->InAt(3).AsRegister<Register>();
+ Register dstBegin = locations->InAt(4).AsRegister<Register>();
+
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ // Compute the address of the destination buffer.
+ __ leal(EDI, Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
+
+ // Compute the address of the source string.
+ if (srcBegin.IsConstant()) {
+ // Compute the address of the source string by adding the number of chars from
+ // the source beginning to the value offset of a string.
+ __ leal(ESI, Address(obj, srcBegin_value * char_size + value_offset));
+ } else {
+ __ leal(ESI, Address(obj, srcBegin.AsRegister<Register>(),
+ ScaleFactor::TIMES_2, value_offset));
+ }
+
+ // Compute the number of chars (words) to move.
+ // Now is the time to save ECX, since we don't know if it will be used later.
+ __ pushl(ECX);
+ int stack_adjust = kX86WordSize;
+ __ cfi().AdjustCFAOffset(stack_adjust);
+ DCHECK_EQ(srcEnd, ECX);
+ if (srcBegin.IsConstant()) {
+ if (srcBegin_value != 0) {
+ __ subl(ECX, Immediate(srcBegin_value));
+ }
+ } else {
+ DCHECK(srcBegin.IsRegister());
+ __ subl(ECX, srcBegin.AsRegister<Register>());
+ }
+
+ // Do the move.
+ __ rep_movsw();
+
+ // And restore ECX.
+ __ popl(ECX);
+ __ cfi().AdjustCFAOffset(-stack_adjust);
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
Register address = locations->InAt(0).AsRegisterPairLow<Register>();
Location out_loc = locations->Out();
@@ -2170,7 +2247,6 @@ void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED)
}
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(LongRotateRight)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2bd86a1ac3..1a13b699c8 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1232,6 +1232,74 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->SetInAt(4, Location::RequiresRegister());
+
+ // And we need some temporaries. We will use REP MOVSW, so we need fixed registers.
+ locations->AddTemp(Location::RegisterLocation(RSI));
+ locations->AddTemp(Location::RegisterLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(RCX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ // Location of data in char array buffer.
+ const uint32_t data_offset = mirror::Array::DataOffset(char_component_size).Uint32Value();
+ // Location of char array data in string.
+ const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+ // public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ Location srcBegin = locations->InAt(1);
+ int srcBegin_value =
+ srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
+ CpuRegister srcEnd = locations->InAt(2).AsRegister<CpuRegister>();
+ CpuRegister dst = locations->InAt(3).AsRegister<CpuRegister>();
+ CpuRegister dstBegin = locations->InAt(4).AsRegister<CpuRegister>();
+
+ // Check assumption that sizeof(Char) is 2 (used in scaling below).
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ // Compute the address of the destination buffer.
+ __ leaq(CpuRegister(RDI), Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
+
+ // Compute the address of the source string.
+ if (srcBegin.IsConstant()) {
+ // Compute the address of the source string by adding the number of chars from
+ // the source beginning to the value offset of a string.
+ __ leaq(CpuRegister(RSI), Address(obj, srcBegin_value * char_size + value_offset));
+ } else {
+ __ leaq(CpuRegister(RSI), Address(obj, srcBegin.AsRegister<CpuRegister>(),
+ ScaleFactor::TIMES_2, value_offset));
+ }
+
+ // Compute the number of chars (words) to move.
+ __ movl(CpuRegister(RCX), srcEnd);
+ if (srcBegin.IsConstant()) {
+ if (srcBegin_value != 0) {
+ __ subl(CpuRegister(RCX), Immediate(srcBegin_value));
+ }
+ } else {
+ DCHECK(srcBegin.IsRegister());
+ __ subl(CpuRegister(RCX), srcBegin.AsRegister<CpuRegister>());
+ }
+
+ // Do the move.
+ __ rep_movsw();
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity.
@@ -1994,7 +2062,6 @@ void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UN
void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
}
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3c57180b6f..8dd31bef86 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4719,16 +4719,29 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
+/**
+ * Implementation strategies for the code generator of a HInstanceOf
+ * or `HCheckCast`.
+ */
+enum class TypeCheckKind {
+ kExactCheck, // Can do a single class compare.
+ kClassHierarchyCheck, // Can just walk the super class chain.
+ kAbstractClassCheck, // Can just walk the super class chain, starting one up.
+ kInterfaceCheck, // No optimization yet when checking against an interface.
+ kArrayObjectCheck, // Can just check if the array is not primitive.
+ kArrayCheck // No optimization yet when checking against a generic array.
+};
+
class HInstanceOf : public HExpression<2> {
public:
HInstanceOf(HInstruction* object,
HLoadClass* constant,
- bool class_is_final,
+ TypeCheckKind check_kind,
uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean,
- SideEffectsForArchRuntimeCalls(class_is_final),
+ SideEffectsForArchRuntimeCalls(check_kind),
dex_pc),
- class_is_final_(class_is_final),
+ check_kind_(check_kind),
must_do_null_check_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -4744,20 +4757,25 @@ class HInstanceOf : public HExpression<2> {
return false;
}
- bool IsClassFinal() const { return class_is_final_; }
+ bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; }
+
+ TypeCheckKind GetTypeCheckKind() const { return check_kind_; }
// Used only in code generation.
bool MustDoNullCheck() const { return must_do_null_check_; }
void ClearMustDoNullCheck() { must_do_null_check_ = false; }
- static SideEffects SideEffectsForArchRuntimeCalls(bool class_is_final) {
- return class_is_final ? SideEffects::None() : SideEffects::CanTriggerGC();
+ static SideEffects SideEffectsForArchRuntimeCalls(TypeCheckKind check_kind) {
+ return (check_kind == TypeCheckKind::kExactCheck)
+ ? SideEffects::None()
+ // Mips currently does runtime calls for any other checks.
+ : SideEffects::CanTriggerGC();
}
DECLARE_INSTRUCTION(InstanceOf);
private:
- const bool class_is_final_;
+ const TypeCheckKind check_kind_;
bool must_do_null_check_;
DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
@@ -4813,10 +4831,10 @@ class HCheckCast : public HTemplateInstruction<2> {
public:
HCheckCast(HInstruction* object,
HLoadClass* constant,
- bool class_is_final,
+ TypeCheckKind check_kind,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc),
- class_is_final_(class_is_final),
+ check_kind_(check_kind),
must_do_null_check_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -4837,14 +4855,14 @@ class HCheckCast : public HTemplateInstruction<2> {
bool MustDoNullCheck() const { return must_do_null_check_; }
void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+ TypeCheckKind GetTypeCheckKind() const { return check_kind_; }
-
- bool IsClassFinal() const { return class_is_final_; }
+ bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; }
DECLARE_INSTRUCTION(CheckCast);
private:
- const bool class_is_final_;
+ const TypeCheckKind check_kind_;
bool must_do_null_check_;
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 9dcbea08c5..6f71ea3d6b 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -577,12 +577,28 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
const HTryBoundary& try_entry =
instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
for (HExceptionHandlerIterator it(try_entry); !it.Done(); it.Advance()) {
- ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(it.Current());
+ HBasicBlock* catch_block = it.Current();
+ ArenaVector<HInstruction*>* handler_locals = GetLocalsFor(catch_block);
DCHECK_EQ(handler_locals->size(), current_locals_->size());
- for (size_t i = 0, e = current_locals_->size(); i < e; ++i) {
- HInstruction* local_value = (*current_locals_)[i];
- if (local_value != nullptr) {
- (*handler_locals)[i]->AsPhi()->AddInput(local_value);
+ for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+ HInstruction* handler_value = (*handler_locals)[vreg];
+ if (handler_value == nullptr) {
+ // Vreg was undefined at a previously encountered throwing instruction
+ // and the catch phi was deleted. Do not record the local value.
+ continue;
+ }
+ DCHECK(handler_value->IsPhi());
+
+ HInstruction* local_value = (*current_locals_)[vreg];
+ if (local_value == nullptr) {
+ // This is the first instruction throwing into `catch_block` where
+ // `vreg` is undefined. Delete the catch phi.
+ catch_block->RemovePhi(handler_value->AsPhi());
+ (*handler_locals)[vreg] = nullptr;
+ } else {
+ // Vreg has been defined at all instructions throwing into `catch_block`
+ // encountered so far. Record the local value in the catch phi.
+ handler_value->AsPhi()->AddInput(local_value);
}
}
}