Merge "Revert^3 "ART: Reference.getReferent intrinsic for x86 and x86_64"" am: 0a50965275
am: 690b07f42e
Change-Id: If69c90a8ebf0cd36d6e85aea579bb22f87a83a41
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c117777..c918ee6 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1403,20 +1403,6 @@
locations->AddTemp(Location::RequiresRegister());
}
-uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetSlowPathFlagOffset().Uint32Value();
-}
-
-uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
- DCHECK(klass->IsInitialized());
- return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
-}
-
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
const uint8_t* roots_data) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fc2f4c1..c9ba5c3 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -573,9 +573,6 @@
virtual void GenerateNop() = 0;
- uint32_t GetReferenceSlowFlagOffset() const;
- uint32_t GetReferenceDisableFlagOffset() const;
-
static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
protected:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 658049a..b8465cd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4521,18 +4521,16 @@
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- if (slow_path != nullptr) {
- if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
- int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
- __ movl(temp, Address(ESP, stack_offset));
- return temp;
- }
+ DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ movl(temp, Address(ESP, stack_offset));
+ return temp;
}
return location.AsRegister<Register>();
}
-Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -4590,11 +4588,6 @@
break;
}
}
- return callee_method;
-}
-
-void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 21c527e..8130bd9 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -408,7 +408,6 @@
HInvokeStaticOrDirect* invoke) OVERRIDE;
// Generate a call to a static or direct method.
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
// Generate a call to a virtual method.
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1d49afc..8dde298 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -977,9 +977,10 @@
return desired_dispatch_info;
}
-Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
// All registers are assumed to be correctly set up.
+
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -1032,13 +1033,6 @@
break;
}
}
- return callee_method;
-}
-
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
- // All registers are assumed to be correctly set up.
- Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 3039e05..2547981 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -404,7 +404,6 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
- Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index fa843a6..a9da15d 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2819,65 +2819,6 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- LocationSummary* locations = invoke->GetLocations();
- X86Assembler* assembler = GetAssembler();
-
- Register obj = locations->InAt(0).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0));
- DCHECK(temp_loc.Equals(locations->GetTemp(0)));
- Register temp = temp_loc.AsRegister<Register>();
-
- // Now get declaring class.
- __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags preventing us for using intrinsic.
- if (slow_path_flag_offset == disable_flag_offset + 1) {
- __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- } else {
- __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
-
- // Fast path.
- __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
return instruction->InputAt(input0) == instruction->InputAt(input1);
}
@@ -3429,6 +3370,7 @@
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4f4592b..8100645 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2959,65 +2959,6 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier) {
- // Do not intrinsify this call with the read barrier configuration.
- return;
- }
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
- DCHECK(!kEmitCompilerReadBarrier);
- LocationSummary* locations = invoke->GetLocations();
- X86_64Assembler* assembler = GetAssembler();
-
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- // Load ArtMethod first.
- HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(invoke_direct != nullptr);
- Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
- invoke_direct, locations->GetTemp(0));
- DCHECK(temp_loc.Equals(locations->GetTemp(0)));
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
-
- // Now get declaring class.
- __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
-
- uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
- uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
- DCHECK_NE(slow_path_flag_offset, 0u);
- DCHECK_NE(disable_flag_offset, 0u);
- DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
-
- // Check static flags preventing us for using intrinsic.
- if (slow_path_flag_offset == disable_flag_offset + 1) {
- __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- } else {
- __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
- }
-
- // Fast path.
- __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
- codegen_->MaybeRecordImplicitNullCheck(invoke);
- __ MaybeUnpoisonHeapReference(out);
- __ Bind(slow_path->GetExitLabel());
-}
-
void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
@@ -3106,6 +3047,7 @@
__ Bind(&done);
}
+UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)