diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 14 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.h | 3 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 19 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 1 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 59 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 59 | 
8 files changed, 11 insertions, 157 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 3983661143..e7fa4e472b 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1298,18 +1298,4 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {    locations->AddTemp(Location::RequiresRegister());  } -uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const { -  ScopedObjectAccess soa(Thread::Current()); -  mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); -  DCHECK(klass->IsInitialized()); -  return klass->GetSlowPathFlagOffset().Uint32Value(); -} - -uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const { -  ScopedObjectAccess soa(Thread::Current()); -  mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); -  DCHECK(klass->IsInitialized()); -  return klass->GetDisableIntrinsicFlagOffset().Uint32Value(); -} -  }  // namespace art diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 6f742d924c..d69c41055b 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -464,9 +464,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {    virtual void GenerateNop() = 0; -  uint32_t GetReferenceSlowFlagOffset() const; -  uint32_t GetReferenceDisableFlagOffset() const; -   protected:    // Method patch info used for recording locations of required linker patches and    // target methods. The target method can be used for various purposes, whether for diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 858a79c9c9..6dc480bbee 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4308,18 +4308,16 @@ Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOr    // save one load. However, since this is just an intrinsic slow path we prefer this    // simple and more robust approach rather that trying to determine if that's the case.    SlowPathCode* slow_path = GetCurrentSlowPath(); -  if (slow_path != nullptr) { -    if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) { -      int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>()); -      __ movl(temp, Address(ESP, stack_offset)); -      return temp; -    } +  DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path. +  if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) { +    int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>()); +    __ movl(temp, Address(ESP, stack_offset)); +    return temp;    }    return location.AsRegister<Register>();  } -Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, -                                                                  Location temp) { +void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {    Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.    switch (invoke->GetMethodLoadKind()) {      case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: @@ -4368,11 +4366,6 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO        break;      }    } -  return callee_method; -} - -void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) { -  Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);    switch (invoke->GetCodePtrLocation()) {      case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 98dc8ca280..fe7d3ed85c 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -398,7 +398,6 @@ class CodeGeneratorX86 : public CodeGenerator {        MethodReference target_method) OVERRIDE;    // Generate a call to a static or direct method. -  Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);    void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;    // Generate a call to a virtual method.    void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 80bcec27cd..96ec09c2a8 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -762,9 +762,10 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStati    }  } -Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, -                                                                     Location temp) { +void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, +                                                     Location temp) {    // All registers are assumed to be correctly set up. +    Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.    switch (invoke->GetMethodLoadKind()) {      case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: @@ -814,13 +815,6 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat        break;      }    } -  return callee_method; -} - -void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, -                                                     Location temp) { -  // All registers are assumed to be correctly set up. -  Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);    switch (invoke->GetCodePtrLocation()) {      case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 7cf12459b0..d9908bb961 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -394,7 +394,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {        const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,        MethodReference target_method) OVERRIDE; -  Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);    void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;    void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 86e904acf9..4aab3e2768 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -2630,65 +2630,8 @@ void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke)    GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);  } -void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) { -  if (kEmitCompilerReadBarrier) { -    // Do not intrinsify this call with the read barrier configuration. -    return; -  } -  LocationSummary* locations = new (arena_) LocationSummary(invoke, -                                                            LocationSummary::kCallOnSlowPath, -                                                            kIntrinsified); -  locations->SetInAt(0, Location::RequiresRegister()); -  locations->SetOut(Location::SameAsFirstInput()); -  locations->AddTemp(Location::RequiresRegister()); -} - -void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { -  DCHECK(!kEmitCompilerReadBarrier); -  LocationSummary* locations = invoke->GetLocations(); -  X86Assembler* assembler = GetAssembler(); - -  Register obj = locations->InAt(0).AsRegister<Register>(); -  Register out = locations->Out().AsRegister<Register>(); - -  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke); -  codegen_->AddSlowPath(slow_path); - -  // Load ArtMethod first. -  HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect(); -  DCHECK(invoke_direct != nullptr); -  Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall( -      invoke_direct, locations->GetTemp(0)); -  DCHECK(temp_loc.Equals(locations->GetTemp(0))); -  Register temp = temp_loc.AsRegister<Register>(); - -  // Now get declaring class. -  __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value())); - -  uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset(); -  uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset(); -  DCHECK_NE(slow_path_flag_offset, 0u); -  DCHECK_NE(disable_flag_offset, 0u); -  DCHECK_NE(slow_path_flag_offset, disable_flag_offset); - -  // Check static flags preventing us for using intrinsic. -  if (slow_path_flag_offset == disable_flag_offset + 1) { -    __ cmpw(Address(temp, disable_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -  } else { -    __ cmpb(Address(temp, disable_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -    __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -  } - -  // Fast path. -  __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value())); -  codegen_->MaybeRecordImplicitNullCheck(invoke); -  __ Bind(slow_path->GetExitLabel()); -} -  UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)  UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy)  UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)  UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 1a7cea07f8..1d32dc7bc5 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2718,64 +2718,7 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok    GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);  } -void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) { -  if (kEmitCompilerReadBarrier) { -    // Do not intrinsify this call with the read barrier configuration. -    return; -  } -  LocationSummary* locations = new (arena_) LocationSummary(invoke, -                                                            LocationSummary::kCallOnSlowPath, -                                                            kIntrinsified); -  locations->SetInAt(0, Location::RequiresRegister()); -  locations->SetOut(Location::SameAsFirstInput()); -  locations->AddTemp(Location::RequiresRegister()); -} - -void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { -  DCHECK(!kEmitCompilerReadBarrier); -  LocationSummary* locations = invoke->GetLocations(); -  X86_64Assembler* assembler = GetAssembler(); - -  CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); -  CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - -  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); -  codegen_->AddSlowPath(slow_path); - -  // Load ArtMethod first. -  HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect(); -  DCHECK(invoke_direct != nullptr); -  Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall( -      invoke_direct, locations->GetTemp(0)); -  DCHECK(temp_loc.Equals(locations->GetTemp(0))); -  CpuRegister temp = temp_loc.AsRegister<CpuRegister>(); - -  // Now get declaring class. -  __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value())); - -  uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset(); -  uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset(); -  DCHECK_NE(slow_path_flag_offset, 0u); -  DCHECK_NE(disable_flag_offset, 0u); -  DCHECK_NE(slow_path_flag_offset, disable_flag_offset); - -  // Check static flags preventing us for using intrinsic. -  if (slow_path_flag_offset == disable_flag_offset + 1) { -    __ cmpw(Address(temp, disable_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -  } else { -    __ cmpb(Address(temp, disable_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -    __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0)); -    __ j(kNotEqual, slow_path->GetEntryLabel()); -  } - -  // Fast path. -  __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value())); -  codegen_->MaybeRecordImplicitNullCheck(invoke); -  __ Bind(slow_path->GetExitLabel()); -} - +UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)  UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)  UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)  |