diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/builder.cc | 89 | ||||
| -rw-r--r-- | compiler/optimizing/builder.h | 8 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 63 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 3 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 73 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 75 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 60 | 
7 files changed, 298 insertions, 73 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 8418ab0a7e..76a2be927e 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -374,13 +374,12 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,    const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);    HInvoke* invoke = nullptr; -  if (invoke_type == kVirtual || invoke_type == kInterface) { +  if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {      MethodReference target_method(dex_file_, method_idx);      uintptr_t direct_code;      uintptr_t direct_method;      int table_index;      InvokeType optimized_invoke_type = invoke_type; -    // TODO: Add devirtualization support.      compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,                                          &optimized_invoke_type, &target_method, &table_index,                                          &direct_code, &direct_method); @@ -388,15 +387,21 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,        return false;      } -    if (invoke_type == kVirtual) { +    if (optimized_invoke_type == kVirtual) {        invoke = new (arena_) HInvokeVirtual(            arena_, number_of_arguments, return_type, dex_offset, table_index); -    } else { -      DCHECK_EQ(invoke_type, kInterface); +    } else if (optimized_invoke_type == kInterface) {        invoke = new (arena_) HInvokeInterface(            arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index); +    } else if (optimized_invoke_type == kDirect) { +      // For this compiler, sharpening only works if we compile PIC. +      DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic()); +      // Treat invoke-direct like static calls for now. +      invoke = new (arena_) HInvokeStatic( +          arena_, number_of_arguments, return_type, dex_offset, target_method.dex_method_index);      }    } else { +    DCHECK(invoke_type == kDirect || invoke_type == kStatic);      // Treat invoke-direct like static calls for now.      invoke = new (arena_) HInvokeStatic(          arena_, number_of_arguments, return_type, dex_offset, method_idx); @@ -704,6 +709,38 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,    }  } +bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction, +                                   uint8_t destination, +                                   uint8_t reference, +                                   uint16_t type_index, +                                   uint32_t dex_offset) { +  bool type_known_final; +  bool type_known_abstract; +  bool is_referrers_class; +  bool can_access = compiler_driver_->CanAccessTypeWithoutChecks( +      dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index, +      &type_known_final, &type_known_abstract, &is_referrers_class); +  if (!can_access) { +    return false; +  } +  HInstruction* object = LoadLocal(reference, Primitive::kPrimNot); +  HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset); +  current_block_->AddInstruction(cls); +  // The class needs a temporary before being used by the type check. +  Temporaries temps(graph_, 1); +  temps.Add(cls); +  if (instruction.Opcode() == Instruction::INSTANCE_OF) { +    current_block_->AddInstruction( +        new (arena_) HInstanceOf(object, cls, type_known_final, dex_offset)); +    UpdateLocal(destination, current_block_->GetLastInstruction()); +  } else { +    DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST); +    current_block_->AddInstruction( +        new (arena_) HCheckCast(object, cls, type_known_final, dex_offset)); +  } +  return true; +} +  void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {    if (target_offset <= 0) {      // Unconditionnally add a suspend check to backward branches. We can remove @@ -858,10 +895,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32        break;      } -    case Instruction::INVOKE_STATIC:      case Instruction::INVOKE_DIRECT: -    case Instruction::INVOKE_VIRTUAL: -    case Instruction::INVOKE_INTERFACE: { +    case Instruction::INVOKE_INTERFACE: +    case Instruction::INVOKE_STATIC: +    case Instruction::INVOKE_SUPER: +    case Instruction::INVOKE_VIRTUAL: {        uint32_t method_idx = instruction.VRegB_35c();        uint32_t number_of_vreg_arguments = instruction.VRegA_35c();        uint32_t args[5]; @@ -873,10 +911,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32        break;      } -    case Instruction::INVOKE_STATIC_RANGE:      case Instruction::INVOKE_DIRECT_RANGE: -    case Instruction::INVOKE_VIRTUAL_RANGE: -    case Instruction::INVOKE_INTERFACE_RANGE: { +    case Instruction::INVOKE_INTERFACE_RANGE: +    case Instruction::INVOKE_STATIC_RANGE: +    case Instruction::INVOKE_SUPER_RANGE: +    case Instruction::INVOKE_VIRTUAL_RANGE: {        uint32_t method_idx = instruction.VRegB_3rc();        uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();        uint32_t register_index = instruction.VRegC(); @@ -1292,25 +1331,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32      }      case Instruction::INSTANCE_OF: { +      uint8_t destination = instruction.VRegA_22c(); +      uint8_t reference = instruction.VRegB_22c();        uint16_t type_index = instruction.VRegC_22c(); -      bool type_known_final; -      bool type_known_abstract; -      bool is_referrers_class; -      bool can_access = compiler_driver_->CanAccessTypeWithoutChecks( -          dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index, -          &type_known_final, &type_known_abstract, &is_referrers_class); -      if (!can_access) { +      if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_offset)) { +        return false; +      } +      break; +    } + +    case Instruction::CHECK_CAST: { +      uint8_t reference = instruction.VRegA_21c(); +      uint16_t type_index = instruction.VRegB_21c(); +      if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_offset)) {          return false;        } -      HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot); -      HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset); -      current_block_->AddInstruction(cls); -      // The class needs a temporary before being used by the type check. -      Temporaries temps(graph_, 1); -      temps.Add(cls); -      current_block_->AddInstruction( -          new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset)); -      UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());        break;      } diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 09c9a51260..9cf83055b2 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -173,6 +173,14 @@ class HGraphBuilder : public ValueObject {                                uint32_t element_count,                                uint32_t dex_offset); +  // Builds a `HInstanceOf`, or a `HCheckCast` instruction. +  // Returns whether we succeeded in building the instruction. +  bool BuildTypeCheck(const Instruction& instruction, +                      uint8_t destination, +                      uint8_t reference, +                      uint16_t type_index, +                      uint32_t dex_offset); +    ArenaAllocator* const arena_;    // A list of the size of the dex code holding block information for diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 467c2a6c29..b0a56d58a3 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -269,13 +269,19 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {  class TypeCheckSlowPathARM : public SlowPathCodeARM {   public: -  explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class) +  TypeCheckSlowPathARM(HInstruction* instruction, +                       Location class_to_check, +                       Location object_class, +                       uint32_t dex_pc)        : instruction_(instruction), -        object_class_(object_class) {} +        class_to_check_(class_to_check), +        object_class_(object_class), +        dex_pc_(dex_pc) {}    virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {      LocationSummary* locations = instruction_->GetLocations(); -    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); +    DCHECK(instruction_->IsCheckCast() +           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));      CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);      __ Bind(GetEntryLabel()); @@ -284,7 +290,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {      // We're moving two locations to locations that could overlap, so we need a parallel      // move resolver.      InvokeRuntimeCallingConvention calling_convention; -    MoveOperands move1(locations->InAt(1), +    MoveOperands move1(class_to_check_,                         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),                         nullptr);      MoveOperands move2(object_class_, @@ -295,17 +301,23 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {      parallel_move.AddMove(&move2);      arm_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); -    arm_codegen->InvokeRuntime( -        QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc()); -    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); +    if (instruction_->IsInstanceOf()) { +      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_); +      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); +    } else { +      DCHECK(instruction_->IsCheckCast()); +      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); +    }      codegen->RestoreLiveRegisters(locations);      __ b(GetExitLabel());    }   private: -  HTypeCheck* const instruction_; +  HInstruction* const instruction_; +  const Location class_to_check_;    const Location object_class_; +  uint32_t dex_pc_;    DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);  }; @@ -2658,7 +2670,7 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {        QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());  } -void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) { +void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary::CallKind call_kind = instruction->IsClassFinal()        ? LocationSummary::kNoCall        : LocationSummary::kCallOnSlowPath; @@ -2668,7 +2680,7 @@ void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {    locations->SetOut(Location::RequiresRegister());  } -void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) { +void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary* locations = instruction->GetLocations();    Register obj = locations->InAt(0).As<Register>();    Register cls = locations->InAt(1).As<Register>(); @@ -2693,7 +2705,7 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {      // If the classes are not equal, we go into a slow path.      DCHECK(locations->OnlyCallsOnSlowPath());      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( -        instruction, Location::RegisterLocation(out)); +        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());      codegen_->AddSlowPath(slow_path);      __ b(slow_path->GetEntryLabel(), NE);      __ LoadImmediate(out, 1); @@ -2707,5 +2719,34 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {    __ Bind(&done);  } +void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( +      instruction, LocationSummary::kCallOnSlowPath); +  locations->SetInAt(0, Location::RequiresRegister()); +  locations->SetInAt(1, Location::RequiresRegister()); +  locations->AddTemp(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = instruction->GetLocations(); +  Register obj = locations->InAt(0).As<Register>(); +  Register cls = locations->InAt(1).As<Register>(); +  Register temp = locations->GetTemp(0).As<Register>(); +  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + +  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( +      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); +  codegen_->AddSlowPath(slow_path); + +  // TODO: avoid this check if we know obj is not null. +  __ cmp(obj, ShifterOperand(0)); +  __ b(slow_path->GetExitLabel(), EQ); +  // Compare the class of `obj` with `cls`. +  __ LoadFromOffset(kLoadWord, temp, obj, class_offset); +  __ cmp(temp, ShifterOperand(cls)); +  __ b(slow_path->GetEntryLabel(), NE); +  __ Bind(slow_path->GetExitLabel()); +} +  }  // namespace arm  }  // namespace art diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 4dc836f412..ac65c1d03d 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -631,8 +631,10 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,          codegen_(codegen) {}  #define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \ +  M(CheckCast)                                             \    M(ClinitCheck)                                           \    M(DivZeroCheck)                                          \ +  M(InstanceOf)                                            \    M(InvokeInterface)                                       \    M(LoadClass)                                             \    M(LoadException)                                         \ @@ -641,7 +643,6 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,    M(StaticFieldGet)                                        \    M(StaticFieldSet)                                        \    M(Throw)                                                 \ -  M(TypeCheck)                                             \    M(TypeConversion)                                        \  #define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index d66180be32..aa609a629c 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -270,13 +270,19 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {  class TypeCheckSlowPathX86 : public SlowPathCodeX86 {   public: -  TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class) +  TypeCheckSlowPathX86(HInstruction* instruction, +                       Location class_to_check, +                       Location object_class, +                       uint32_t dex_pc)        : instruction_(instruction), -        object_class_(object_class) {} +        class_to_check_(class_to_check), +        object_class_(object_class), +        dex_pc_(dex_pc) {}    virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {      LocationSummary* locations = instruction_->GetLocations(); -    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); +    DCHECK(instruction_->IsCheckCast() +           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));      CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);      __ Bind(GetEntryLabel()); @@ -285,7 +291,7 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {      // We're moving two locations to locations that could overlap, so we need a parallel      // move resolver.      InvokeRuntimeCallingConvention calling_convention; -    MoveOperands move1(locations->InAt(1), +    MoveOperands move1(class_to_check_,                         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),                         nullptr);      MoveOperands move2(object_class_, @@ -296,17 +302,27 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {      parallel_move.AddMove(&move2);      x86_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); -    __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial))); -    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); -    x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); +    if (instruction_->IsInstanceOf()) { +      __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial))); +    } else { +      DCHECK(instruction_->IsCheckCast()); +      __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast))); +    } + +    codegen->RecordPcInfo(instruction_, dex_pc_); +    if (instruction_->IsInstanceOf()) { +      x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); +    }      codegen->RestoreLiveRegisters(locations);      __ jmp(GetExitLabel());    }   private: -  HTypeCheck* const instruction_; +  HInstruction* const instruction_; +  const Location class_to_check_;    const Location object_class_; +  const uint32_t dex_pc_;    DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);  }; @@ -2753,7 +2769,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());  } -void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) { +void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary::CallKind call_kind = instruction->IsClassFinal()        ? LocationSummary::kNoCall        : LocationSummary::kCallOnSlowPath; @@ -2763,7 +2779,7 @@ void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {    locations->SetOut(Location::RequiresRegister());  } -void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) { +void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary* locations = instruction->GetLocations();    Register obj = locations->InAt(0).As<Register>();    Location cls = locations->InAt(1); @@ -2794,7 +2810,7 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {      // If the classes are not equal, we go into a slow path.      DCHECK(locations->OnlyCallsOnSlowPath());      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( -        instruction, Location::RegisterLocation(out)); +        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());      codegen_->AddSlowPath(slow_path);      __ j(kNotEqual, slow_path->GetEntryLabel());      __ movl(out, Immediate(1)); @@ -2808,5 +2824,40 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {    __ Bind(&done);  } +void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( +      instruction, LocationSummary::kCallOnSlowPath); +  locations->SetInAt(0, Location::RequiresRegister()); +  locations->SetInAt(1, Location::Any()); +  locations->AddTemp(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = instruction->GetLocations(); +  Register obj = locations->InAt(0).As<Register>(); +  Location cls = locations->InAt(1); +  Register temp = locations->GetTemp(0).As<Register>(); +  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); +  SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( +      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); +  codegen_->AddSlowPath(slow_path); + +  // TODO: avoid this check if we know obj is not null. +  __ testl(obj, obj); +  __ j(kEqual, slow_path->GetExitLabel()); +  __ movl(temp, Address(obj, class_offset)); + +  // Compare the class of `obj` with `cls`. +  if (cls.IsRegister()) { +    __ cmpl(temp, cls.As<Register>()); +  } else { +    DCHECK(cls.IsStackSlot()) << cls; +    __ cmpl(temp, Address(ESP, cls.GetStackIndex())); +  } + +  __ j(kNotEqual, slow_path->GetEntryLabel()); +  __ Bind(slow_path->GetExitLabel()); +} +  }  // namespace x86  }  // namespace art diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e09b6cab08..bd1e4f4f04 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -284,13 +284,19 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {  class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {   public: -  TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class) +  TypeCheckSlowPathX86_64(HInstruction* instruction, +                          Location class_to_check, +                          Location object_class, +                          uint32_t dex_pc)        : instruction_(instruction), -        object_class_(object_class) {} +        class_to_check_(class_to_check), +        object_class_(object_class), +        dex_pc_(dex_pc) {}    virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {      LocationSummary* locations = instruction_->GetLocations(); -    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); +    DCHECK(instruction_->IsCheckCast() +           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));      CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);      __ Bind(GetEntryLabel()); @@ -299,7 +305,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {      // We're moving two locations to locations that could overlap, so we need a parallel      // move resolver.      InvokeRuntimeCallingConvention calling_convention; -    MoveOperands move1(locations->InAt(1), +    MoveOperands move1(class_to_check_,                         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),                         nullptr);      MoveOperands move2(object_class_, @@ -310,18 +316,29 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {      parallel_move.AddMove(&move2);      x64_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); -    __ gs()->call( -        Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true)); -    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); -    x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); +    if (instruction_->IsInstanceOf()) { +      __ gs()->call( +          Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true)); +    } else { +      DCHECK(instruction_->IsCheckCast()); +      __ gs()->call( +          Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true)); +    } +    codegen->RecordPcInfo(instruction_, dex_pc_); + +    if (instruction_->IsInstanceOf()) { +      x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); +    }      codegen->RestoreLiveRegisters(locations);      __ jmp(GetExitLabel());    }   private: -  HTypeCheck* const instruction_; +  HInstruction* const instruction_; +  const Location class_to_check_;    const Location object_class_; +  const uint32_t dex_pc_;    DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);  }; @@ -2743,7 +2760,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());  } -void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) { +void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary::CallKind call_kind = instruction->IsClassFinal()        ? LocationSummary::kNoCall        : LocationSummary::kCallOnSlowPath; @@ -2753,7 +2770,7 @@ void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {    locations->SetOut(Location::RequiresRegister());  } -void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) { +void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {    LocationSummary* locations = instruction->GetLocations();    CpuRegister obj = locations->InAt(0).As<CpuRegister>();    Location cls = locations->InAt(1); @@ -2783,7 +2800,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {      // If the classes are not equal, we go into a slow path.      DCHECK(locations->OnlyCallsOnSlowPath());      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( -        instruction, Location::RegisterLocation(out.AsRegister())); +        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());      codegen_->AddSlowPath(slow_path);      __ j(kNotEqual, slow_path->GetEntryLabel());      __ movl(out, Immediate(1)); @@ -2797,5 +2814,39 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {    __ Bind(&done);  } +void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( +      instruction, LocationSummary::kCallOnSlowPath); +  locations->SetInAt(0, Location::RequiresRegister()); +  locations->SetInAt(1, Location::Any()); +  locations->AddTemp(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { +  LocationSummary* locations = instruction->GetLocations(); +  CpuRegister obj = locations->InAt(0).As<CpuRegister>(); +  Location cls = locations->InAt(1); +  CpuRegister temp = locations->GetTemp(0).As<CpuRegister>(); +  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); +  SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( +      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); +  codegen_->AddSlowPath(slow_path); + +  // TODO: avoid this check if we know obj is not null. +  __ testl(obj, obj); +  __ j(kEqual, slow_path->GetExitLabel()); +  // Compare the class of `obj` with `cls`. +  __ movl(temp, Address(obj, class_offset)); +  if (cls.IsRegister()) { +    __ cmpl(temp, cls.As<CpuRegister>()); +  } else { +    DCHECK(cls.IsStackSlot()) << cls; +    __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex())); +  } +  // Classes must be equal for the checkcast to succeed. +  __ j(kNotEqual, slow_path->GetEntryLabel()); +  __ Bind(slow_path->GetExitLabel()); +} +  }  // namespace x86_64  }  // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 36e286d578..0c75e41fc4 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -479,6 +479,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {    M(ArrayLength, Instruction)                                           \    M(ArraySet, Instruction)                                              \    M(BoundsCheck, Instruction)                                           \ +  M(CheckCast, Instruction)                                             \    M(ClinitCheck, Instruction)                                           \    M(Compare, BinaryOperation)                                           \    M(Condition, BinaryOperation)                                         \ @@ -494,6 +495,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {    M(If, Instruction)                                                    \    M(InstanceFieldGet, Instruction)                                      \    M(InstanceFieldSet, Instruction)                                      \ +  M(InstanceOf, Instruction)                                            \    M(IntConstant, Constant)                                              \    M(InvokeInterface, Invoke)                                            \    M(InvokeStatic, Invoke)                                               \ @@ -525,7 +527,6 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {    M(SuspendCheck, Instruction)                                          \    M(Temporary, Instruction)                                             \    M(Throw, Instruction)                                                 \ -  M(TypeCheck, Instruction)                                             \    M(TypeConversion, Instruction)                                        \  #define FOR_EACH_INSTRUCTION(M)                                         \ @@ -2355,12 +2356,12 @@ class HThrow : public HTemplateInstruction<1> {    DISALLOW_COPY_AND_ASSIGN(HThrow);  }; -class HTypeCheck : public HExpression<2> { +class HInstanceOf : public HExpression<2> {   public: -  explicit HTypeCheck(HInstruction* object, -                      HLoadClass* constant, -                      bool class_is_final, -                      uint32_t dex_pc) +  HInstanceOf(HInstruction* object, +              HLoadClass* constant, +              bool class_is_final, +              uint32_t dex_pc)        : HExpression(Primitive::kPrimBoolean, SideEffects::None()),          class_is_final_(class_is_final),          dex_pc_(dex_pc) { @@ -2370,13 +2371,11 @@ class HTypeCheck : public HExpression<2> {    bool CanBeMoved() const OVERRIDE { return true; } -  bool InstructionDataEquals(HInstruction* other) const OVERRIDE { -    UNUSED(other); +  bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {      return true;    }    bool NeedsEnvironment() const OVERRIDE { -    // TODO: Can we debug when doing a runtime instanceof check?      return false;    } @@ -2384,13 +2383,52 @@ class HTypeCheck : public HExpression<2> {    bool IsClassFinal() const { return class_is_final_; } -  DECLARE_INSTRUCTION(TypeCheck); +  DECLARE_INSTRUCTION(InstanceOf); + + private: +  const bool class_is_final_; +  const uint32_t dex_pc_; + +  DISALLOW_COPY_AND_ASSIGN(HInstanceOf); +}; + +class HCheckCast : public HTemplateInstruction<2> { + public: +  HCheckCast(HInstruction* object, +             HLoadClass* constant, +             bool class_is_final, +             uint32_t dex_pc) +      : HTemplateInstruction(SideEffects::None()), +        class_is_final_(class_is_final), +        dex_pc_(dex_pc) { +    SetRawInputAt(0, object); +    SetRawInputAt(1, constant); +  } + +  bool CanBeMoved() const OVERRIDE { return true; } + +  bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { +    return true; +  } + +  bool NeedsEnvironment() const OVERRIDE { +    // Instruction may throw a CheckCastError. +    return true; +  } + +  bool CanThrow() const OVERRIDE { return true; } + +  uint32_t GetDexPc() const { return dex_pc_; } + +  bool IsClassFinal() const { return class_is_final_; } + +  DECLARE_INSTRUCTION(CheckCast);   private:    const bool class_is_final_;    const uint32_t dex_pc_; -  DISALLOW_COPY_AND_ASSIGN(HTypeCheck); +  DISALLOW_COPY_AND_ASSIGN(HCheckCast);  };  |