diff options
author | 2015-09-17 17:12:37 +0000 | |
---|---|---|
committer | 2015-09-17 17:12:37 +0000 | |
commit | 6766eae2d91e894b4ceab9f29cc983900e7bc0c7 (patch) | |
tree | 4a3aac762e01c7933bcbffebb5277bde208e975b /compiler/optimizing | |
parent | 930761fb7a4db70fbd5e75faa1fca07e5b494ae9 (diff) | |
parent | 7537437c6a2f89249a48e30effcc27d4e7c5a04f (diff) |
Merge "Revert "Optimize code generation of check-cast and instance-of.""
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/builder.cc | 54 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 314 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 348 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 364 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 367 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 42 |
7 files changed, 263 insertions, 1230 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 274a2a699f..3012346a95 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -1614,48 +1614,25 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object, } } -static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls) - SHARED_REQUIRES(Locks::mutator_lock_) { - if (cls->IsInterface()) { - return TypeCheckKind::kInterfaceCheck; - } else if (cls->IsArrayClass()) { - if (cls->GetComponentType()->IsObjectClass()) { - return TypeCheckKind::kArrayObjectCheck; - } else if (cls->CannotBeAssignedFromOtherTypes()) { - return TypeCheckKind::kExactCheck; - } else { - return TypeCheckKind::kArrayCheck; - } - } else if (cls->IsFinal()) { - return TypeCheckKind::kExactCheck; - } else if (cls->IsAbstract()) { - return TypeCheckKind::kAbstractClassCheck; - } else { - return TypeCheckKind::kClassHierarchyCheck; - } -} - bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction, uint8_t destination, uint8_t reference, uint16_t type_index, uint32_t dex_pc) { - ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<2> hs(soa.Self()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle( - dex_compilation_unit_->GetClassLinker()->FindDexCache( - soa.Self(), *dex_compilation_unit_->GetDexFile()))); - Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index))); - - if ((resolved_class.Get() == nullptr) || - // TODO: Remove this check once the compiler actually knows which - // ArtMethod it is compiling. - (GetCompilingClass() == nullptr) || - !GetCompilingClass()->CanAccess(resolved_class.Get())) { + bool type_known_final; + bool type_known_abstract; + // `CanAccessTypeWithoutChecks` will tell whether the method being + // built is trying to access its own class, so that the generated + // code can optimize for this case. However, the optimization does not + // work for inlining, so we use `IsOutermostCompilingClass` instead. + bool dont_use_is_referrers_class; + bool can_access = compiler_driver_->CanAccessTypeWithoutChecks( + dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index, + &type_known_final, &type_known_abstract, &dont_use_is_referrers_class); + if (!can_access) { MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType); return false; } - HInstruction* object = LoadLocal(reference, Primitive::kPrimNot, dex_pc); HLoadClass* cls = new (arena_) HLoadClass( graph_->GetCurrentMethod(), @@ -1664,18 +1641,17 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction, IsOutermostCompilingClass(type_index), dex_pc); current_block_->AddInstruction(cls); - // The class needs a temporary before being used by the type check. Temporaries temps(graph_); temps.Add(cls); - - TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class); if (instruction.Opcode() == Instruction::INSTANCE_OF) { - current_block_->AddInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc)); + current_block_->AddInstruction( + new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc)); UpdateLocal(destination, current_block_->GetLastInstruction(), dex_pc); } else { DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST); - current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc)); + current_block_->AddInstruction( + new (arena_) HCheckCast(object, cls, type_known_final, dex_pc)); } return true; } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index a09863c7b2..6f89293018 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -271,8 +271,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { class TypeCheckSlowPathARM : public SlowPathCodeARM { public: - TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -283,19 +282,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - - if (instruction_->IsCheckCast()) { - // The codegen for the instruction overwrites `temp`, so put it back in place. - Register obj = locations->InAt(0).AsRegister<Register>(); - Register temp = locations->GetTemp(0).AsRegister<Register>(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - __ MaybeUnpoisonHeapReference(temp); - } - - if (!is_fatal_) { - SaveLiveRegisters(codegen, locations); - } + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -322,19 +309,14 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { this); } - if (!is_fatal_) { - RestoreLiveRegisters(codegen, locations); - __ b(GetExitLabel()); - } + RestoreLiveRegisters(codegen, locations); + __ b(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } - private: HInstruction* const instruction_; - const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); }; @@ -4375,34 +4357,15 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { } void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - // The out register is used as a temporary, so it overlaps with the inputs. - // Note that TypeCheckSlowPathARM uses this register too. - locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - locations->SetOut(Location::RegisterLocation(R0)); - } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // The out register is used as a temporary, so it overlaps with the inputs. + // Note that TypeCheckSlowPathARM uses this register too. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { @@ -4411,9 +4374,6 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { Register cls = locations->InAt(1).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); Label done, zero; SlowPathCodeARM* slow_path = nullptr; @@ -4422,242 +4382,68 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { if (instruction->MustDoNullCheck()) { __ CompareAndBranchIfZero(obj, &zero); } - - // In case of an interface check, we put the object class into the object register. - // This is safe, as the register is caller-save, and the object must be in another - // register if it survives the runtime call. - Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) - ? obj - : out; - __ LoadFromOffset(kLoadWord, target, obj, class_offset); - __ MaybeUnpoisonHeapReference(target); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: { - __ cmp(out, ShifterOperand(cls)); - // Classes must be equal for the instanceof to succeed. - __ b(&zero, NE); - __ LoadImmediate(out, 1); - __ b(&done); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - Label loop; - __ Bind(&loop); - __ LoadFromOffset(kLoadWord, out, out, super_offset); - __ MaybeUnpoisonHeapReference(out); - // If `out` is null, we use it for the result, and jump to `done`. - __ CompareAndBranchIfZero(out, &done); - __ cmp(out, ShifterOperand(cls)); - __ b(&loop, NE); - __ LoadImmediate(out, 1); - if (zero.IsLinked()) { - __ b(&done); - } - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - Label loop, success; - __ Bind(&loop); - __ cmp(out, ShifterOperand(cls)); - __ b(&success, EQ); - __ LoadFromOffset(kLoadWord, out, out, super_offset); - __ MaybeUnpoisonHeapReference(out); - __ CompareAndBranchIfNonZero(out, &loop); - // If `out` is null, we use it for the result, and jump to `done`. - __ b(&done); - __ Bind(&success); - __ LoadImmediate(out, 1); - if (zero.IsLinked()) { - __ b(&done); - } - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ LoadFromOffset(kLoadWord, out, out, component_offset); - __ MaybeUnpoisonHeapReference(out); - // If `out` is null, we use it for the result, and jump to `done`. - __ CompareAndBranchIfZero(out, &done); - __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset); - static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); - __ CompareAndBranchIfNonZero(out, &zero); - __ LoadImmediate(out, 1); - __ b(&done); - break; - } - case TypeCheckKind::kArrayCheck: { - __ cmp(out, ShifterOperand(cls)); - DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( - instruction, /* is_fatal */ false); - codegen_->AddSlowPath(slow_path); - __ b(slow_path->GetEntryLabel(), NE); - __ LoadImmediate(out, 1); - if (zero.IsLinked()) { - __ b(&done); - } - break; - } - - case TypeCheckKind::kInterfaceCheck: - default: { - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), - instruction, - instruction->GetDexPc(), - nullptr); - if (zero.IsLinked()) { - __ b(&done); - } - break; - } + // Compare the class of `obj` with `cls`. + __ LoadFromOffset(kLoadWord, out, obj, class_offset); + __ MaybeUnpoisonHeapReference(out); + __ cmp(out, ShifterOperand(cls)); + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ b(&zero, NE); + __ LoadImmediate(out, 1); + __ b(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction); + codegen_->AddSlowPath(slow_path); + __ b(slow_path->GetEntryLabel(), NE); + __ LoadImmediate(out, 1); + __ b(&done); } - if (zero.IsLinked()) { + if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) { __ Bind(&zero); __ LoadImmediate(out, 0); } - if (done.IsLinked()) { - __ Bind(&done); - } - if (slow_path != nullptr) { __ Bind(slow_path->GetExitLabel()); } + __ Bind(&done); } void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - bool throws_into_catch = instruction->CanThrowIntoCatchBlock(); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = throws_into_catch - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( - instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - // Note that TypeCheckSlowPathARM uses this register too. - locations->AddTemp(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - } + instruction, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // Note that TypeCheckSlowPathARM uses this register too. + locations->AddTemp(Location::RequiresRegister()); } void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); Register cls = locations->InAt(1).AsRegister<Register>(); - Register temp = locations->WillCall() - ? Register(kNoRegister) - : locations->GetTemp(0).AsRegister<Register>(); - + Register temp = locations->GetTemp(0).AsRegister<Register>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeARM* slow_path = nullptr; - if (!locations->WillCall()) { - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( - instruction, !locations->CanCall()); - codegen_->AddSlowPath(slow_path); - } + SlowPathCodeARM* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction); + codegen_->AddSlowPath(slow_path); - Label done; - // Avoid null check if we know obj is not null. + // avoid null check if we know obj is not null. if (instruction->MustDoNullCheck()) { - __ CompareAndBranchIfZero(obj, &done); - } - - if (locations->WillCall()) { - __ LoadFromOffset(kLoadWord, obj, obj, class_offset); - __ MaybeUnpoisonHeapReference(obj); - } else { - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - __ MaybeUnpoisonHeapReference(temp); - } - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kArrayCheck: { - __ cmp(temp, ShifterOperand(cls)); - // Jump to slow path for throwing the exception or doing a - // more involved array check. - __ b(slow_path->GetEntryLabel(), NE); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - Label loop; - __ Bind(&loop); - __ LoadFromOffset(kLoadWord, temp, temp, super_offset); - __ MaybeUnpoisonHeapReference(temp); - // Jump to the slow path to throw the exception. - __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel()); - __ cmp(temp, ShifterOperand(cls)); - __ b(&loop, NE); - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - Label loop, success; - __ Bind(&loop); - __ cmp(temp, ShifterOperand(cls)); - __ b(&success, EQ); - __ LoadFromOffset(kLoadWord, temp, temp, super_offset); - __ MaybeUnpoisonHeapReference(temp); - __ CompareAndBranchIfNonZero(temp, &loop); - // Jump to the slow path to throw the exception. - __ b(slow_path->GetEntryLabel()); - __ Bind(&success); - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ LoadFromOffset(kLoadWord, temp, temp, component_offset); - __ MaybeUnpoisonHeapReference(temp); - __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel()); - __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset); - static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); - __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kInterfaceCheck: - default: - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), - instruction, - instruction->GetDexPc(), - nullptr); - break; - } - __ Bind(&done); - - if (slow_path != nullptr) { - __ Bind(slow_path->GetExitLabel()); + __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel()); } + // Compare the class of `obj` with `cls`. + __ LoadFromOffset(kLoadWord, temp, obj, class_offset); + __ MaybeUnpoisonHeapReference(temp); + __ cmp(temp, ShifterOperand(cls)); + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. + __ b(slow_path->GetEntryLabel(), NE); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 580e93e9c4..531b66927b 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -409,8 +409,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + explicit TypeCheckSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -423,19 +422,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { uint32_t dex_pc = instruction_->GetDexPc(); __ Bind(GetEntryLabel()); - - if (instruction_->IsCheckCast()) { - // The codegen for the instruction overwrites `temp`, so put it back in place. - Register obj = InputRegisterAt(instruction_, 0); - Register temp = WRegisterFrom(locations->GetTemp(0)); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - __ Ldr(temp, HeapOperand(obj, class_offset)); - arm64_codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp); - } - - if (!is_fatal_) { - SaveLiveRegisters(codegen, locations); - } + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -458,18 +445,14 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); } - if (!is_fatal_) { - RestoreLiveRegisters(codegen, locations); - __ B(GetExitLabel()); - } + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; } - bool IsFatal() const { return is_fatal_; } private: HInstruction* const instruction_; - const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); }; @@ -1646,6 +1629,38 @@ void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) __ B(slow_path->GetEntryLabel(), hs); } +void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // Note that TypeCheckSlowPathARM64 uses this register too. + locations->AddTemp(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { + Register obj = InputRegisterAt(instruction, 0);; + Register cls = InputRegisterAt(instruction, 1);; + Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); + + SlowPathCodeARM64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + + // Avoid null check if we know obj is not null. + if (instruction->MustDoNullCheck()) { + __ Cbz(obj, slow_path->GetExitLabel()); + } + // Compare the class of `obj` with `cls`. + __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); + GetAssembler()->MaybeUnpoisonHeapReference(obj_cls.W()); + __ Cmp(obj_cls, cls); + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. + __ B(ne, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); @@ -2239,291 +2254,50 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins } void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } + LocationSummary::CallKind call_kind = + instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - // The out register is used as a temporary, so it overlaps with the inputs. - // Note that TypeCheckSlowPathARM64 uses this register too. - locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); - } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // The output does overlap inputs. + // Note that TypeCheckSlowPathARM64 uses this register too. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = instruction->GetLocations(); - Register obj = InputRegisterAt(instruction, 0); - Register cls = InputRegisterAt(instruction, 1); + Register obj = InputRegisterAt(instruction, 0);; + Register cls = InputRegisterAt(instruction, 1);; Register out = OutputRegister(instruction); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - vixl::Label done, zero; - SlowPathCodeARM64* slow_path = nullptr; + vixl::Label done; // Return 0 if `obj` is null. // Avoid null check if we know `obj` is not null. if (instruction->MustDoNullCheck()) { - __ Cbz(obj, &zero); - } - - // In case of an interface check, we put the object class into the object register. - // This is safe, as the register is caller-save, and the object must be in another - // register if it survives the runtime call. - Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) - ? obj - : out; - __ Ldr(target, HeapOperand(obj.W(), class_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(target); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: { - __ Cmp(out, cls); - __ Cset(out, eq); - if (zero.IsLinked()) { - __ B(&done); - } - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - vixl::Label loop, success; - __ Bind(&loop); - __ Ldr(out, HeapOperand(out, super_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(out); - // If `out` is null, we use it for the result, and jump to `done`. - __ Cbz(out, &done); - __ Cmp(out, cls); - __ B(ne, &loop); - __ Mov(out, 1); - if (zero.IsLinked()) { - __ B(&done); - } - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - vixl::Label loop, success; - __ Bind(&loop); - __ Cmp(out, cls); - __ B(eq, &success); - __ Ldr(out, HeapOperand(out, super_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(out); - __ Cbnz(out, &loop); - // If `out` is null, we use it for the result, and jump to `done`. - __ B(&done); - __ Bind(&success); - __ Mov(out, 1); - if (zero.IsLinked()) { - __ B(&done); - } - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ Ldr(out, HeapOperand(out, component_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(out); - // If `out` is null, we use it for the result, and jump to `done`. - __ Cbz(out, &done); - __ Ldrh(out, HeapOperand(out, primitive_offset)); - static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); - __ Cbnz(out, &zero); - __ Mov(out, 1); - __ B(&done); - break; - } - case TypeCheckKind::kArrayCheck: { - __ Cmp(out, cls); - DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( - instruction, /* is_fatal */ false); - codegen_->AddSlowPath(slow_path); - __ B(ne, slow_path->GetEntryLabel()); - __ Mov(out, 1); - if (zero.IsLinked()) { - __ B(&done); - } - break; - } - - case TypeCheckKind::kInterfaceCheck: - default: { - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), - instruction, - instruction->GetDexPc(), - nullptr); - if (zero.IsLinked()) { - __ B(&done); - } - break; - } - } - - if (zero.IsLinked()) { - __ Bind(&zero); __ Mov(out, 0); - } - - if (done.IsLinked()) { - __ Bind(&done); - } - - if (slow_path != nullptr) { - __ Bind(slow_path->GetExitLabel()); - } -} - -void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - bool throws_into_catch = instruction->CanThrowIntoCatchBlock(); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = throws_into_catch - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } - - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( - instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - // Note that TypeCheckSlowPathARM64 uses this register too. - locations->AddTemp(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); - } -} - -void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { - LocationSummary* locations = instruction->GetLocations(); - Register obj = InputRegisterAt(instruction, 0); - Register cls = InputRegisterAt(instruction, 1); - Register temp; - if (!locations->WillCall()) { - temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); - } - - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeARM64* slow_path = nullptr; - - if (!locations->WillCall()) { - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( - instruction, !locations->CanCall()); - codegen_->AddSlowPath(slow_path); - } - - vixl::Label done; - // Avoid null check if we know obj is not null. - if (instruction->MustDoNullCheck()) { __ Cbz(obj, &done); } - if (locations->WillCall()) { - __ Ldr(obj, HeapOperand(obj, class_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(obj); + // Compare the class of `obj` with `cls`. + __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); + GetAssembler()->MaybeUnpoisonHeapReference(out.W()); + __ Cmp(out, cls); + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ Cset(out, eq); } else { - __ Ldr(temp, HeapOperand(obj, class_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(temp); + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + SlowPathCodeARM64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + __ B(ne, slow_path->GetEntryLabel()); + __ Mov(out, 1); + __ Bind(slow_path->GetExitLabel()); } - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kArrayCheck: { - __ Cmp(temp, cls); - // Jump to slow path for throwing the exception or doing a - // more involved array check. - __ B(ne, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - vixl::Label loop; - __ Bind(&loop); - __ Ldr(temp, HeapOperand(temp, super_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(temp); - // Jump to the slow path to throw the exception. - __ Cbz(temp, slow_path->GetEntryLabel()); - __ Cmp(temp, cls); - __ B(ne, &loop); - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - vixl::Label loop, success; - __ Bind(&loop); - __ Cmp(temp, cls); - __ B(eq, &success); - __ Ldr(temp, HeapOperand(temp, super_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(temp); - __ Cbnz(temp, &loop); - // Jump to the slow path to throw the exception. - __ B(slow_path->GetEntryLabel()); - __ Bind(&success); - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ Ldr(temp, HeapOperand(temp, component_offset)); - GetAssembler()->MaybeUnpoisonHeapReference(temp); - __ Cbz(temp, slow_path->GetEntryLabel()); - __ Ldrh(temp, HeapOperand(temp, primitive_offset)); - static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); - __ Cbnz(temp, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kInterfaceCheck: - default: - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), - instruction, - instruction->GetDexPc(), - nullptr); - break; - } __ Bind(&done); - - if (slow_path != nullptr) { - __ Bind(slow_path->GetExitLabel()); - } } void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 4722e42694..bf0d2e2a11 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2281,7 +2281,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* in void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary::CallKind call_kind = - instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; + instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -2305,7 +2305,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value()); - if (instruction->IsExactCheck()) { + if (instruction->IsClassFinal()) { // Classes must be equal for the instanceof to succeed. __ Xor(out, out, cls); __ Sltiu(out, out, 1); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 2481c7df34..9c5ecc3544 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -287,8 +287,7 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { class TypeCheckSlowPathX86 : public SlowPathCodeX86 { public: - TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + explicit TypeCheckSlowPathX86(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -299,19 +298,7 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - - if (instruction_->IsCheckCast()) { - // The codegen for the instruction overwrites `temp`, so put it back in place. - Register obj = locations->InAt(0).AsRegister<Register>(); - Register temp = locations->GetTemp(0).AsRegister<Register>(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - __ movl(temp, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(temp); - } - - if (!is_fatal_) { - SaveLiveRegisters(codegen, locations); - } + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -337,22 +324,18 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { this); } - if (!is_fatal_) { - if (instruction_->IsInstanceOf()) { - x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); - } - RestoreLiveRegisters(codegen, locations); - - __ jmp(GetExitLabel()); + if (instruction_->IsInstanceOf()) { + x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); } + RestoreLiveRegisters(codegen, locations); + + __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } private: HInstruction* const instruction_; - const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); }; @@ -4973,33 +4956,14 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { } void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); - // Note that TypeCheckSlowPathX86 uses this register too. - locations->SetOut(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - locations->SetOut(Location::RegisterLocation(EAX)); - } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86 uses this register too. + locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { @@ -5008,11 +4972,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { Location cls = locations->InAt(1); Register out = locations->Out().AsRegister<Register>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeX86* slow_path = nullptr; NearLabel done, zero; + SlowPathCodeX86* slow_path = nullptr; // Return 0 if `obj` is null. // Avoid null check if we know obj is not null. @@ -5020,282 +4981,79 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { __ testl(obj, obj); __ j(kEqual, &zero); } - - // In case of an interface check, we put the object class into the object register. - // This is safe, as the register is caller-save, and the object must be in another - // register if it survives the runtime call. - Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) - ? obj - : out; - __ movl(target, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(target); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: { - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(ESP, cls.GetStackIndex())); - } - // Classes must be equal for the instanceof to succeed. - __ j(kNotEqual, &zero); - __ movl(out, Immediate(1)); - __ jmp(&done); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - NearLabel loop; - __ Bind(&loop); - __ movl(out, Address(out, super_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - // If `out` is null, we use it for the result, and jump to `done`. - __ j(kEqual, &done); - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(ESP, cls.GetStackIndex())); - } - __ j(kNotEqual, &loop); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - NearLabel loop, success; - __ Bind(&loop); - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(ESP, cls.GetStackIndex())); - } - __ j(kEqual, &success); - __ movl(out, Address(out, super_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - __ j(kNotEqual, &loop); - // If `out` is null, we use it for the result, and jump to `done`. - __ jmp(&done); - __ Bind(&success); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ movl(out, Address(out, component_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - // If `out` is null, we use it for the result, and jump to `done`. - __ j(kEqual, &done); - __ cmpw(Address(out, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kNotEqual, &zero); - __ movl(out, Immediate(1)); - __ jmp(&done); - break; - } - case TypeCheckKind::kArrayCheck: { - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(ESP, cls.GetStackIndex())); - } - DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, /* is_fatal */ false); - codegen_->AddSlowPath(slow_path); - __ j(kNotEqual, slow_path->GetEntryLabel()); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - - case TypeCheckKind::kInterfaceCheck: - default: { - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), - instruction, - instruction->GetDexPc(), - nullptr); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } + // Compare the class of `obj` with `cls`. + __ movl(out, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(out); + if (cls.IsRegister()) { + __ cmpl(out, cls.AsRegister<Register>()); + } else { + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(out, Address(ESP, cls.GetStackIndex())); } - if (zero.IsLinked()) { - __ Bind(&zero); - __ xorl(out, out); + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ j(kNotEqual, &zero); + __ movl(out, Immediate(1)); + __ jmp(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction); + codegen_->AddSlowPath(slow_path); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ movl(out, Immediate(1)); + __ jmp(&done); } - if (done.IsLinked()) { - __ Bind(&done); + if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) { + __ Bind(&zero); + __ movl(out, Immediate(0)); } if (slow_path != nullptr) { __ Bind(slow_path->GetExitLabel()); } + __ Bind(&done); } void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - bool throws_into_catch = instruction->CanThrowIntoCatchBlock(); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = throws_into_catch - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( - instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); - // Note that TypeCheckSlowPathX86 uses this register too. - locations->AddTemp(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - } + instruction, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86 uses this register too. + locations->AddTemp(Location::RequiresRegister()); } void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); Location cls = locations->InAt(1); - Register temp = locations->WillCall() - ? kNoRegister - : locations->GetTemp(0).AsRegister<Register>(); - + Register temp = locations->GetTemp(0).AsRegister<Register>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeX86* slow_path = nullptr; - - if (!locations->WillCall()) { - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, !locations->CanCall()); - codegen_->AddSlowPath(slow_path); - } + SlowPathCodeX86* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction); + codegen_->AddSlowPath(slow_path); - NearLabel done, abstract_entry; // Avoid null check if we know obj is not null. if (instruction->MustDoNullCheck()) { __ testl(obj, obj); - __ j(kEqual, &done); + __ j(kEqual, slow_path->GetExitLabel()); } - - if (locations->WillCall()) { - __ movl(obj, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(obj); + // Compare the class of `obj` with `cls`. + __ movl(temp, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(temp); + if (cls.IsRegister()) { + __ cmpl(temp, cls.AsRegister<Register>()); } else { - __ movl(temp, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(temp); - } - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kArrayCheck: { - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(ESP, cls.GetStackIndex())); - } - // Jump to slow path for throwing the exception or doing a - // more involved array check. - __ j(kNotEqual, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - NearLabel loop, success; - __ Bind(&loop); - __ movl(temp, Address(temp, super_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - // Jump to the slow path to throw the exception. - __ j(kEqual, slow_path->GetEntryLabel()); - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(ESP, cls.GetStackIndex())); - } - __ j(kNotEqual, &loop); - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - NearLabel loop, success; - __ Bind(&loop); - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<Register>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(ESP, cls.GetStackIndex())); - } - __ j(kEqual, &success); - __ movl(temp, Address(temp, super_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - __ j(kNotEqual, &loop); - // Jump to the slow path to throw the exception. - __ jmp(slow_path->GetEntryLabel()); - __ Bind(&success); - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ movl(temp, Address(temp, component_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - __ j(kEqual, slow_path->GetEntryLabel()); - __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kInterfaceCheck: - default: - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), - instruction, - instruction->GetDexPc(), - nullptr); - break; - } - __ Bind(&done); - - if (slow_path != nullptr) { - __ Bind(slow_path->GetExitLabel()); + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(temp, Address(ESP, cls.GetStackIndex())); } + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 51029019a1..134bfedb15 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -300,8 +300,8 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: - TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + explicit TypeCheckSlowPathX86_64(HInstruction* instruction) + : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -313,19 +313,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - - if (instruction_->IsCheckCast()) { - // The codegen for the instruction overwrites `temp`, so put it back in place. - CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); - CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - __ movl(temp, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(temp); - } - - if (!is_fatal_) { - SaveLiveRegisters(codegen, locations); - } + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -351,23 +339,18 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { this); } - if (!is_fatal_) { - if (instruction_->IsInstanceOf()) { - x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); - } - - RestoreLiveRegisters(codegen, locations); - __ jmp(GetExitLabel()); + if (instruction_->IsInstanceOf()) { + x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); } + + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathX86_64"; } - bool IsFatal() const OVERRIDE { return is_fatal_; } - private: HInstruction* const instruction_; - const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64); }; @@ -4701,33 +4684,14 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) { } void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); - // Note that TypeCheckSlowPathX86_64 uses this register too. - locations->SetOut(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - locations->SetOut(Location::RegisterLocation(RAX)); - } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86_64 uses this register too. + locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { @@ -4736,11 +4700,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { Location cls = locations->InAt(1); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeX86_64* slow_path = nullptr; NearLabel done, zero; + SlowPathCodeX86_64* slow_path = nullptr; // Return 0 if `obj` is null. // Avoid null check if we know obj is not null. @@ -4748,282 +4709,78 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { __ testl(obj, obj); __ j(kEqual, &zero); } - - // In case of an interface check, we put the object class into the object register. - // This is safe, as the register is caller-save, and the object must be in another - // register if it survives the runtime call. - CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) - ? obj - : out; - __ movl(target, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(target); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: { - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - // Classes must be equal for the instanceof to succeed. - __ j(kNotEqual, &zero); - __ movl(out, Immediate(1)); - __ jmp(&done); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - NearLabel loop, success; - __ Bind(&loop); - __ movl(out, Address(out, super_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - // If `out` is null, we use it for the result, and jump to `done`. - __ j(kEqual, &done); - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - __ j(kNotEqual, &loop); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - NearLabel loop, success; - __ Bind(&loop); - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - __ j(kEqual, &success); - __ movl(out, Address(out, super_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - __ j(kNotEqual, &loop); - // If `out` is null, we use it for the result, and jump to `done`. - __ jmp(&done); - __ Bind(&success); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ movl(out, Address(out, component_offset)); - __ MaybeUnpoisonHeapReference(out); - __ testl(out, out); - // If `out` is null, we use it for the result, and jump to `done`. - __ j(kEqual, &done); - __ cmpw(Address(out, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kNotEqual, &zero); - __ movl(out, Immediate(1)); - __ jmp(&done); - break; - } - case TypeCheckKind::kArrayCheck: { - if (cls.IsRegister()) { - __ cmpl(out, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( - instruction, /* is_fatal */ false); - codegen_->AddSlowPath(slow_path); - __ j(kNotEqual, slow_path->GetEntryLabel()); - __ movl(out, Immediate(1)); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } - - case TypeCheckKind::kInterfaceCheck: - default: { - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), - instruction, - instruction->GetDexPc(), - nullptr); - if (zero.IsLinked()) { - __ jmp(&done); - } - break; - } + // Compare the class of `obj` with `cls`. + __ movl(out, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(out); + if (cls.IsRegister()) { + __ cmpl(out, cls.AsRegister<CpuRegister>()); + } else { + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); + } + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ j(kNotEqual, &zero); + __ movl(out, Immediate(1)); + __ jmp(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction); + codegen_->AddSlowPath(slow_path); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ movl(out, Immediate(1)); + __ jmp(&done); } - if (zero.IsLinked()) { + if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) { __ Bind(&zero); - __ xorl(out, out); - } - - if (done.IsLinked()) { - __ Bind(&done); + __ movl(out, Immediate(0)); } if (slow_path != nullptr) { __ Bind(slow_path->GetExitLabel()); } + __ Bind(&done); } void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; - bool throws_into_catch = instruction->CanThrowIntoCatchBlock(); - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = throws_into_catch - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - break; - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCall; - break; - case TypeCheckKind::kArrayCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( - instruction, call_kind); - if (call_kind != LocationSummary::kCall) { - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); - // Note that TypeCheckSlowPathX86_64 uses this register too. - locations->AddTemp(Location::RequiresRegister()); - } else { - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - } + instruction, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86_64 uses this register too. + locations->AddTemp(Location::RequiresRegister()); } void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = instruction->GetLocations(); CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); Location cls = locations->InAt(1); - CpuRegister temp = locations->WillCall() - ? CpuRegister(kNoRegister) - : locations->GetTemp(0).AsRegister<CpuRegister>(); - + CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - SlowPathCodeX86_64* slow_path = nullptr; - - if (!locations->WillCall()) { - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( - instruction, !locations->CanCall()); - codegen_->AddSlowPath(slow_path); - } + SlowPathCodeX86_64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction); + codegen_->AddSlowPath(slow_path); - NearLabel done; // Avoid null check if we know obj is not null. if (instruction->MustDoNullCheck()) { __ testl(obj, obj); - __ j(kEqual, &done); + __ j(kEqual, slow_path->GetExitLabel()); } - - if (locations->WillCall()) { - __ movl(obj, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(obj); + // Compare the class of `obj` with `cls`. + __ movl(temp, Address(obj, class_offset)); + __ MaybeUnpoisonHeapReference(temp); + if (cls.IsRegister()) { + __ cmpl(temp, cls.AsRegister<CpuRegister>()); } else { - __ movl(temp, Address(obj, class_offset)); - __ MaybeUnpoisonHeapReference(temp); - } - - switch (instruction->GetTypeCheckKind()) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kArrayCheck: { - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - // Jump to slow path for throwing the exception or doing a - // more involved array check. - __ j(kNotEqual, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kAbstractClassCheck: { - // If the class is abstract, we eagerly fetch the super class of the - // object to avoid doing a comparison we know will fail. - NearLabel loop; - __ Bind(&loop); - __ movl(temp, Address(temp, super_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - // Jump to the slow path to throw the exception. - __ j(kEqual, slow_path->GetEntryLabel()); - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - __ j(kNotEqual, &loop); - break; - } - case TypeCheckKind::kClassHierarchyCheck: { - // Walk over the class hierarchy to find a match. - NearLabel loop, success; - __ Bind(&loop); - if (cls.IsRegister()) { - __ cmpl(temp, cls.AsRegister<CpuRegister>()); - } else { - DCHECK(cls.IsStackSlot()) << cls; - __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); - } - __ j(kEqual, &success); - __ movl(temp, Address(temp, super_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - __ j(kNotEqual, &loop); - // Jump to the slow path to throw the exception. - __ jmp(slow_path->GetEntryLabel()); - __ Bind(&success); - break; - } - case TypeCheckKind::kArrayObjectCheck: { - // Just need to check that the object's class is a non primitive array. - __ movl(temp, Address(temp, component_offset)); - __ MaybeUnpoisonHeapReference(temp); - __ testl(temp, temp); - __ j(kEqual, slow_path->GetEntryLabel()); - __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kNotEqual, slow_path->GetEntryLabel()); - break; - } - case TypeCheckKind::kInterfaceCheck: - default: - codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), - instruction, - instruction->GetDexPc(), - nullptr); - break; - } - __ Bind(&done); - - if (slow_path != nullptr) { - __ Bind(slow_path->GetExitLabel()); + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); } + // The checkcast succeeds if the classes are equal (fast path). + // Otherwise, we need to go into the slow path to check the types. + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) { diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 8dd31bef86..3c57180b6f 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -4719,29 +4719,16 @@ class HThrow : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HThrow); }; -/** - * Implementation strategies for the code generator of a HInstanceOf - * or `HCheckCast`. - */ -enum class TypeCheckKind { - kExactCheck, // Can do a single class compare. - kClassHierarchyCheck, // Can just walk the super class chain. - kAbstractClassCheck, // Can just walk the super class chain, starting one up. - kInterfaceCheck, // No optimization yet when checking against an interface. - kArrayObjectCheck, // Can just check if the array is not primitive. - kArrayCheck // No optimization yet when checking against a generic array. -}; - class HInstanceOf : public HExpression<2> { public: HInstanceOf(HInstruction* object, HLoadClass* constant, - TypeCheckKind check_kind, + bool class_is_final, uint32_t dex_pc) : HExpression(Primitive::kPrimBoolean, - SideEffectsForArchRuntimeCalls(check_kind), + SideEffectsForArchRuntimeCalls(class_is_final), dex_pc), - check_kind_(check_kind), + class_is_final_(class_is_final), must_do_null_check_(true) { SetRawInputAt(0, object); SetRawInputAt(1, constant); @@ -4757,25 +4744,20 @@ class HInstanceOf : public HExpression<2> { return false; } - bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; } - - TypeCheckKind GetTypeCheckKind() const { return check_kind_; } + bool IsClassFinal() const { return class_is_final_; } // Used only in code generation. bool MustDoNullCheck() const { return must_do_null_check_; } void ClearMustDoNullCheck() { must_do_null_check_ = false; } - static SideEffects SideEffectsForArchRuntimeCalls(TypeCheckKind check_kind) { - return (check_kind == TypeCheckKind::kExactCheck) - ? SideEffects::None() - // Mips currently does runtime calls for any other checks. - : SideEffects::CanTriggerGC(); + static SideEffects SideEffectsForArchRuntimeCalls(bool class_is_final) { + return class_is_final ? SideEffects::None() : SideEffects::CanTriggerGC(); } DECLARE_INSTRUCTION(InstanceOf); private: - const TypeCheckKind check_kind_; + const bool class_is_final_; bool must_do_null_check_; DISALLOW_COPY_AND_ASSIGN(HInstanceOf); @@ -4831,10 +4813,10 @@ class HCheckCast : public HTemplateInstruction<2> { public: HCheckCast(HInstruction* object, HLoadClass* constant, - TypeCheckKind check_kind, + bool class_is_final, uint32_t dex_pc) : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), - check_kind_(check_kind), + class_is_final_(class_is_final), must_do_null_check_(true) { SetRawInputAt(0, object); SetRawInputAt(1, constant); @@ -4855,14 +4837,14 @@ class HCheckCast : public HTemplateInstruction<2> { bool MustDoNullCheck() const { return must_do_null_check_; } void ClearMustDoNullCheck() { must_do_null_check_ = false; } - TypeCheckKind GetTypeCheckKind() const { return check_kind_; } - bool IsExactCheck() const { return check_kind_ == TypeCheckKind::kExactCheck; } + + bool IsClassFinal() const { return class_is_final_; } DECLARE_INSTRUCTION(CheckCast); private: - const TypeCheckKind check_kind_; + const bool class_is_final_; bool must_do_null_check_; DISALLOW_COPY_AND_ASSIGN(HCheckCast); |