From 5a6cc49ed4f36dd11d6ec1590857b884ad8da6ab Mon Sep 17 00:00:00 2001 From: Serban Constantinescu Date: Thu, 13 Aug 2015 15:20:25 +0100 Subject: SlowPath: Remove the use of Locations in the SlowPath constructors. The main motivation is that using locations in the SlowPath constructors ties us to creating the SlowPaths after register allocation, since before the locations are invalid. A later patch of the series will be moving the SlowPath creation to the LocationsBuilder visitors. This will enable us to add more checking as well as consider sharing multiple SlowPaths of the same type. Change-Id: I7e96dcc2b5586d15153c942373e9281ecfe013f0 Signed-off-by: Serban Constantinescu --- compiler/optimizing/code_generator_arm.cc | 58 +++++++++++++--------------- compiler/optimizing/code_generator_arm64.cc | 55 ++++++++++---------------- compiler/optimizing/code_generator_mips64.cc | 57 +++++++++------------------ compiler/optimizing/code_generator_x86.cc | 40 +++++++------------ compiler/optimizing/code_generator_x86_64.cc | 50 ++++++++++-------------- 5 files changed, 101 insertions(+), 159 deletions(-) (limited to 'compiler/optimizing') diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 62026f31ab..d90bdd47e8 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -142,24 +142,22 @@ class SuspendCheckSlowPathARM : public SlowPathCodeARM { class BoundsCheckSlowPathARM : public SlowPathCodeARM { public: - BoundsCheckSlowPathARM(HBoundsCheck* instruction, - Location index_location, - Location length_location) - : instruction_(instruction), - index_location_(index_location), - length_location_(length_location) {} + explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction) + : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); + LocationSummary* locations = instruction_->GetLocations(); + __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - index_location_, + locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - length_location_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); arm_codegen->InvokeRuntime( @@ -172,8 +170,6 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM { private: HBoundsCheck* const instruction_; - const Location index_location_; - const Location length_location_; DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM); }; @@ -263,17 +259,12 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { class TypeCheckSlowPathARM : public SlowPathCodeARM { public: - TypeCheckSlowPathARM(HInstruction* instruction, - Location class_to_check, - Location object_class, - uint32_t dex_pc) - : instruction_(instruction), - class_to_check_(class_to_check), - object_class_(object_class), - dex_pc_(dex_pc) {} + explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); + Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) + : locations->Out(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -285,20 +276,25 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - class_to_check_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class_, + object_class, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { - arm_codegen->InvokeRuntime( - QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this); + arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), + instruction_, + instruction_->GetDexPc(), + this); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } else { DCHECK(instruction_->IsCheckCast()); - arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); + arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), + instruction_, + instruction_->GetDexPc(), + this); } RestoreLiveRegisters(codegen, locations); @@ -309,9 +305,6 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { private: HInstruction* const instruction_; - const Location class_to_check_; - const Location object_class_; - uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); }; @@ -3901,8 +3894,8 @@ void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); - SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM( - instruction, locations->InAt(0), locations->InAt(1)); + SlowPathCodeARM* slow_path = + new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction); codegen_->AddSlowPath(slow_path); Register index = locations->InAt(0).AsRegister(); @@ -4346,6 +4339,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // The out register is used as a temporary, so it overlaps with the inputs. + // Note that TypeCheckSlowPathARM uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } @@ -4375,8 +4369,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } else { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( - instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction); codegen_->AddSlowPath(slow_path); __ b(slow_path->GetEntryLabel(), NE); __ LoadImmediate(out, 1); @@ -4399,6 +4392,7 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { instruction, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); + // Note that TypeCheckSlowPathARM uses this register too. locations->AddTemp(Location::RequiresRegister()); } @@ -4409,8 +4403,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { Register temp = locations->GetTemp(0).AsRegister(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( - instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); + SlowPathCodeARM* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction); codegen_->AddSlowPath(slow_path); // avoid null check if we know obj is not null. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 25b3ea2f5f..8035461239 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -191,23 +191,19 @@ void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSum class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - BoundsCheckSlowPathARM64(HBoundsCheck* instruction, - Location index_location, - Location length_location) - : instruction_(instruction), - index_location_(index_location), - length_location_(length_location) {} - + explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); + locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, + locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); arm64_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); @@ -219,8 +215,6 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { private: HBoundsCheck* const instruction_; - const Location index_location_; - const Location length_location_; DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); }; @@ -403,20 +397,17 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - TypeCheckSlowPathARM64(HInstruction* instruction, - Location class_to_check, - Location object_class, - uint32_t dex_pc) - : instruction_(instruction), - class_to_check_(class_to_check), - object_class_(object_class), - dex_pc_(dex_pc) {} + explicit TypeCheckSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); + Location class_to_check = locations->InAt(1); + Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) + : locations->Out(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + uint32_t dex_pc = instruction_->GetDexPc(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -425,12 +416,12 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); + class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, + object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { arm64_codegen->InvokeRuntime( - QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this); + QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); @@ -438,7 +429,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { const mirror::Class*, const mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); - arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); + arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this); CheckEntrypointTypes(); } @@ -450,9 +441,6 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { private: HInstruction* const instruction_; - const Location class_to_check_; - const Location object_class_; - uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); }; @@ -1602,9 +1590,8 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { } void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary* locations = instruction->GetLocations(); - BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( - instruction, locations->InAt(0), locations->InAt(1)); + BoundsCheckSlowPathARM64* slow_path = + new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); @@ -1616,17 +1603,17 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { instruction, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); + // Note that TypeCheckSlowPathARM64 uses this register too. locations->AddTemp(Location::RequiresRegister()); } void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { - LocationSummary* locations = instruction->GetLocations(); Register obj = InputRegisterAt(instruction, 0);; Register cls = InputRegisterAt(instruction, 1);; Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( - instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); + SlowPathCodeARM64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); // Avoid null check if we know obj is not null. @@ -2240,6 +2227,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // The output does overlap inputs. + // Note that TypeCheckSlowPathARM64 uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } @@ -2269,8 +2257,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); SlowPathCodeARM64* slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( - instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); + new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 093d786dfe..e4188e4e9d 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -112,23 +112,19 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction, - Location index_location, - Location length_location) - : instruction_(instruction), - index_location_(index_location), - length_location_(length_location) {} + explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(index_location_, + codegen->EmitParallelMoves(locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - length_location_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), @@ -144,8 +140,6 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { private: HBoundsCheck* const instruction_; - const Location index_location_; - const Location length_location_; DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64); }; @@ -334,17 +328,13 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - TypeCheckSlowPathMIPS64(HInstruction* instruction, - Location class_to_check, - Location object_class, - uint32_t dex_pc) - : instruction_(instruction), - class_to_check_(class_to_check), - object_class_(object_class), - dex_pc_(dex_pc) {} + explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); + Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) + : locations->Out(); + uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); @@ -355,17 +345,17 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(class_to_check_, + codegen->EmitParallelMoves(locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class_, + object_class, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, - dex_pc_, + dex_pc, this); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); @@ -376,7 +366,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); - mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); + mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this); CheckEntrypointTypes(); } @@ -388,9 +378,6 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { private: HInstruction* const instruction_; - const Location class_to_check_; - const Location object_class_; - uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64); }; @@ -1590,10 +1577,8 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); - BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64( - instruction, - locations->InAt(0), - locations->InAt(1)); + BoundsCheckSlowPathMIPS64* slow_path = + new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); GpuRegister index = locations->InAt(0).AsRegister(); @@ -1616,6 +1601,7 @@ void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) { LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); + // Note that TypeCheckSlowPathMIPS64 uses this register too. locations->AddTemp(Location::RequiresRegister()); } @@ -1625,11 +1611,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) { GpuRegister cls = locations->InAt(1).AsRegister(); GpuRegister obj_cls = locations->GetTemp(0).AsRegister(); - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64( - instruction, - locations->InAt(1), - Location::RegisterLocation(obj_cls), - instruction->GetDexPc()); + SlowPathCodeMIPS64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); // TODO: avoid this check if we know obj is not null. @@ -2270,6 +2253,7 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); // The output does overlap inputs. + // Note that TypeCheckSlowPathMIPS64 uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } @@ -2296,10 +2280,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); SlowPathCodeMIPS64* slow_path = - new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction, - locations->InAt(1), - locations->Out(), - instruction->GetDexPc()); + new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction); codegen_->AddSlowPath(slow_path); __ Bnec(out, cls, slow_path->GetEntryLabel()); __ LoadConst32(out, 1); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 72c690de9a..e8aa61d4ef 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -116,24 +116,20 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCodeX86 { class BoundsCheckSlowPathX86 : public SlowPathCodeX86 { public: - BoundsCheckSlowPathX86(HBoundsCheck* instruction, - Location index_location, - Location length_location) - : instruction_(instruction), - index_location_(index_location), - length_location_(length_location) {} + explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; x86_codegen->EmitParallelMoves( - index_location_, + locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - length_location_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), @@ -148,8 +144,6 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 { private: HBoundsCheck* const instruction_; - const Location index_location_; - const Location length_location_; DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); }; @@ -280,15 +274,12 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { class TypeCheckSlowPathX86 : public SlowPathCodeX86 { public: - TypeCheckSlowPathX86(HInstruction* instruction, - Location class_to_check, - Location object_class) - : instruction_(instruction), - class_to_check_(class_to_check), - object_class_(object_class) {} + explicit TypeCheckSlowPathX86(HInstruction* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); + Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) + : locations->Out(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -300,10 +291,10 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { // move resolver. InvokeRuntimeCallingConvention calling_convention; x86_codegen->EmitParallelMoves( - class_to_check_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class_, + object_class, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); @@ -332,8 +323,6 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { private: HInstruction* const instruction_; - const Location class_to_check_; - const Location object_class_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); }; @@ -4357,7 +4346,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { Location index_loc = locations->InAt(0); Location length_loc = locations->InAt(1); SlowPathCodeX86* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc); + new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction); if (length_loc.IsConstant()) { int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant()); @@ -4830,6 +4819,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86 uses this register too. locations->SetOut(Location::RequiresRegister()); } @@ -4866,8 +4856,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } else { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, locations->InAt(1), locations->Out()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -4890,6 +4879,7 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { instruction, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86 uses this register too. locations->AddTemp(Location::RequiresRegister()); } @@ -4899,8 +4889,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { Location cls = locations->InAt(1); Register temp = locations->GetTemp(0).AsRegister(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, locations->InAt(1), locations->GetTemp(0)); + SlowPathCodeX86* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction); codegen_->AddSlowPath(slow_path); // Avoid null check if we know obj is not null. diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 820ec781bb..ff52f4f925 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -170,24 +170,21 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 { class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: - BoundsCheckSlowPathX86_64(HBoundsCheck* instruction, - Location index_location, - Location length_location) - : instruction_(instruction), - index_location_(index_location), - length_location_(length_location) {} + explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction) + : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); CodeGeneratorX86_64* x64_codegen = down_cast(codegen); __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - index_location_, + locations->InAt(0), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt, - length_location_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), @@ -200,8 +197,6 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 { private: HBoundsCheck* const instruction_; - const Location index_location_; - const Location length_location_; DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64); }; @@ -293,17 +288,14 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: - TypeCheckSlowPathX86_64(HInstruction* instruction, - Location class_to_check, - Location object_class, - uint32_t dex_pc) - : instruction_(instruction), - class_to_check_(class_to_check), - object_class_(object_class), - dex_pc_(dex_pc) {} + explicit TypeCheckSlowPathX86_64(HInstruction* instruction) + : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); + Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) + : locations->Out(); + uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -315,23 +307,23 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { // move resolver. InvokeRuntimeCallingConvention calling_convention; codegen->EmitParallelMoves( - class_to_check_, + locations->InAt(1), Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class_, + object_class, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, - dex_pc_, + dex_pc, this); } else { DCHECK(instruction_->IsCheckCast()); x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, - dex_pc_, + dex_pc, this); } @@ -347,9 +339,6 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { private: HInstruction* const instruction_; - const Location class_to_check_; - const Location object_class_; - const uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64); }; @@ -4195,7 +4184,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) Location index_loc = locations->InAt(0); Location length_loc = locations->InAt(1); SlowPathCodeX86_64* slow_path = - new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc); + new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction); if (length_loc.IsConstant()) { int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant()); @@ -4653,6 +4642,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86_64 uses this register too. locations->SetOut(Location::RequiresRegister()); } @@ -4688,8 +4678,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } else { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); - slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( - instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -4712,6 +4701,7 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) { instruction, LocationSummary::kCallOnSlowPath); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); + // Note that TypeCheckSlowPathX86_64 uses this register too. locations->AddTemp(Location::RequiresRegister()); } @@ -4721,8 +4711,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { Location cls = locations->InAt(1); CpuRegister temp = locations->GetTemp(0).AsRegister(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( - instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); + SlowPathCodeX86_64* slow_path = + new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction); codegen_->AddSlowPath(slow_path); // Avoid null check if we know obj is not null. -- cgit v1.2.3-59-g8ed1b