summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc2
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc166
-rw-r--r--compiler/optimizing/code_generator_riscv64.h60
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc16
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc16
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc877
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc18
11 files changed, 1036 insertions, 127 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 997d7a48c0..cf5e9d23d3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5212,7 +5212,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
void CodeGeneratorARM64::LoadTypeForBootImageIntrinsic(vixl::aarch64::Register reg,
TypeReference target_type) {
- // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ // Load the type the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
// Add ADRP with its PC-relative type patch.
vixl::aarch64::Label* adrp_label =
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b9496ebbe0..76ed33563c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -9748,7 +9748,7 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
void CodeGeneratorARMVIXL::LoadTypeForBootImageIntrinsic(vixl::aarch32::Register reg,
TypeReference target_type) {
- // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ // Load the type the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
PcRelativePatchInfo* labels =
NewBootImageTypePatch(*target_type.dex_file, target_type.TypeIndex());
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 899dbca295..14e284b463 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -22,8 +22,11 @@
#include "arch/riscv64/registers_riscv64.h"
#include "base/arena_containers.h"
#include "base/macros.h"
+#include "class_root-inl.h"
#include "code_generator_utils.h"
#include "dwarf/register.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics_list.h"
#include "intrinsics_riscv64.h"
@@ -31,6 +34,8 @@
#include "linker/linker_patch.h"
#include "mirror/class-inl.h"
#include "optimizing/nodes.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "stack_map_stream.h"
#include "trace.h"
#include "utils/label.h"
@@ -959,6 +964,49 @@ void InstructionCodeGeneratorRISCV64::Store(
}
}
+void InstructionCodeGeneratorRISCV64::StoreSeqCst(Location value,
+ XRegister rs1,
+ int32_t offset,
+ DataType::Type type,
+ HInstruction* instruction) {
+ if (DataType::Size(type) >= 4u) {
+ // Use AMOSWAP for 32-bit and 64-bit data types.
+ ScratchRegisterScope srs(GetAssembler());
+ XRegister swap_src = kNoXRegister;
+ if (kPoisonHeapReferences && type == DataType::Type::kReference && !value.IsConstant()) {
+ swap_src = srs.AllocateXRegister();
+ __ Mv(swap_src, value.AsRegister<XRegister>());
+ codegen_->PoisonHeapReference(swap_src);
+ } else if (DataType::IsFloatingPointType(type) && !value.IsConstant()) {
+ swap_src = srs.AllocateXRegister();
+ FMvX(swap_src, value.AsFpuRegister<FRegister>(), type);
+ } else {
+ swap_src = InputXRegisterOrZero(value);
+ }
+ XRegister addr = rs1;
+ if (offset != 0) {
+ addr = srs.AllocateXRegister();
+ __ AddConst64(addr, rs1, offset);
+ }
+ if (DataType::Is64BitType(type)) {
+ __ AmoSwapD(Zero, swap_src, addr, AqRl::kRelease);
+ } else {
+ __ AmoSwapW(Zero, swap_src, addr, AqRl::kRelease);
+ }
+ if (instruction != nullptr) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+ } else {
+ // Use fences for smaller data types.
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ Store(value, rs1, offset, type);
+ if (instruction != nullptr) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+}
+
void InstructionCodeGeneratorRISCV64::ShNAdd(
XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type) {
switch (type) {
@@ -1831,13 +1879,19 @@ void CodeGeneratorRISCV64::GenerateReferenceLoadWithBakerReadBarrier(HInstructio
XRegister reg = ref.AsRegister<XRegister>();
if (index.IsValid()) {
- DCHECK(instruction->IsArrayGet());
DCHECK(!needs_null_check);
DCHECK(index.IsRegister());
- // /* HeapReference<Object> */ ref = *(obj + index * element_size + offset)
DataType::Type type = DataType::Type::kReference;
DCHECK_EQ(type, instruction->GetType());
- instruction_visitor_.ShNAdd(reg, index.AsRegister<XRegister>(), obj, type);
+ if (instruction->IsArrayGet()) {
+ // /* HeapReference<Object> */ ref = *(obj + index * element_size + offset)
+ instruction_visitor_.ShNAdd(reg, index.AsRegister<XRegister>(), obj, type);
+ } else {
+ // /* HeapReference<Object> */ ref = *(obj + index + offset)
+ DCHECK(instruction->IsInvoke());
+ DCHECK(instruction->GetLocations()->Intrinsified());
+ __ Add(reg, index.AsRegister<XRegister>(), obj);
+ }
__ Loadwu(reg, reg, offset);
} else {
// /* HeapReference<Object> */ ref = *(obj + offset)
@@ -2399,35 +2453,7 @@ void InstructionCodeGeneratorRISCV64::HandleFieldSet(HInstruction* instruction,
}
if (is_volatile) {
- if (DataType::Size(type) >= 4u) {
- // Use AMOSWAP for 32-bit and 64-bit data types.
- ScratchRegisterScope srs(GetAssembler());
- XRegister swap_src = kNoXRegister;
- if (kPoisonHeapReferences && type == DataType::Type::kReference && !value.IsConstant()) {
- swap_src = srs.AllocateXRegister();
- __ Mv(swap_src, value.AsRegister<XRegister>());
- codegen_->PoisonHeapReference(swap_src);
- } else if (DataType::IsFloatingPointType(type) && !value.IsConstant()) {
- swap_src = srs.AllocateXRegister();
- FMvX(swap_src, value.AsFpuRegister<FRegister>(), type);
- } else {
- swap_src = InputXRegisterOrZero(value);
- }
- XRegister addr = srs.AllocateXRegister();
- __ AddConst64(addr, obj, offset);
- if (DataType::Is64BitType(type)) {
- __ AmoSwapD(Zero, swap_src, addr, AqRl::kRelease);
- } else {
- __ AmoSwapW(Zero, swap_src, addr, AqRl::kRelease);
- }
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- } else {
- // Use fences for smaller data types.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
- Store(value, obj, offset, type);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
- }
+ StoreSeqCst(value, obj, offset, type, instruction);
} else {
Store(value, obj, offset, type);
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -2574,7 +2600,7 @@ void InstructionCodeGeneratorRISCV64::GenerateMethodEntryExitHook(HInstruction*
// Check if there is place in the buffer to store a new entry, if no, take the slow path.
int32_t trace_buffer_index_offset =
- Thread::TraceBufferIndexOffset<kArm64PointerSize>().Int32Value();
+ Thread::TraceBufferIndexOffset<kRiscv64PointerSize>().Int32Value();
__ Loadd(tmp, TR, trace_buffer_index_offset);
__ Addi(tmp, tmp, -dchecked_integral_cast<int32_t>(kNumEntriesForWallClock));
__ Bltz(tmp, slow_path->GetEntryLabel());
@@ -2588,7 +2614,7 @@ void InstructionCodeGeneratorRISCV64::GenerateMethodEntryExitHook(HInstruction*
// Calculate the entry address in the buffer.
// /*addr*/ tmp = TR->GetMethodTraceBuffer() + sizeof(void*) * /*index*/ tmp;
- __ Loadd(tmp2, TR, Thread::TraceBufferPtrOffset<kArm64PointerSize>().SizeValue());
+ __ Loadd(tmp2, TR, Thread::TraceBufferPtrOffset<kRiscv64PointerSize>().SizeValue());
__ Sh3Add(tmp, tmp, tmp2);
// Record method pointer and trace action.
@@ -4066,10 +4092,17 @@ void InstructionCodeGeneratorRISCV64::VisitInvokeVirtual(HInvokeVirtual* instruc
}
void LocationsBuilderRISCV64::VisitInvokePolymorphic(HInvokePolymorphic* instruction) {
+ IntrinsicLocationsBuilderRISCV64 intrinsic(GetGraph()->GetAllocator(), codegen_);
+ if (intrinsic.TryDispatch(instruction)) {
+ return;
+ }
HandleInvoke(instruction);
}
void InstructionCodeGeneratorRISCV64::VisitInvokePolymorphic(HInvokePolymorphic* instruction) {
+ if (TryGenerateIntrinsicCode(instruction, codegen_)) {
+ return;
+ }
codegen_->GenerateInvokePolymorphicCall(instruction);
}
@@ -4184,12 +4217,7 @@ void InstructionCodeGeneratorRISCV64::VisitLoadClass(HLoadClass* instruction)
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(instruction);
- CodeGeneratorRISCV64::PcRelativePatchInfo* info_high =
- codegen_->NewBootImageRelRoPatch(boot_image_offset);
- codegen_->EmitPcRelativeAuipcPlaceholder(info_high, out);
- CodeGeneratorRISCV64::PcRelativePatchInfo* info_low =
- codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
- codegen_->EmitPcRelativeLwuPlaceholder(info_low, out, out);
+ codegen_->LoadBootImageRelRoEntry(out, boot_image_offset);
break;
}
case HLoadClass::LoadKind::kBssEntry:
@@ -4321,12 +4349,7 @@ void InstructionCodeGeneratorRISCV64::VisitLoadString(HLoadString* instruction)
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(instruction);
- CodeGeneratorRISCV64::PcRelativePatchInfo* info_high =
- codegen_->NewBootImageRelRoPatch(boot_image_offset);
- codegen_->EmitPcRelativeAuipcPlaceholder(info_high, out);
- CodeGeneratorRISCV64::PcRelativePatchInfo* info_low =
- codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
- codegen_->EmitPcRelativeLwuPlaceholder(info_low, out, out);
+ codegen_->LoadBootImageRelRoEntry(out, boot_image_offset);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -6470,6 +6493,46 @@ void CodeGeneratorRISCV64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* l
DCHECK_EQ(size, linker_patches->size());
}
+void CodeGeneratorRISCV64::LoadTypeForBootImageIntrinsic(XRegister dest,
+ TypeReference target_type) {
+ // Load the type the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
+ PcRelativePatchInfo* info_high =
+ NewBootImageTypePatch(*target_type.dex_file, target_type.TypeIndex());
+ EmitPcRelativeAuipcPlaceholder(info_high, dest);
+ PcRelativePatchInfo* info_low =
+ NewBootImageTypePatch(*target_type.dex_file, target_type.TypeIndex(), info_high);
+ EmitPcRelativeAddiPlaceholder(info_low, dest, dest);
+}
+
+void CodeGeneratorRISCV64::LoadBootImageRelRoEntry(XRegister dest, uint32_t boot_image_offset) {
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+ EmitPcRelativeAuipcPlaceholder(info_high, dest);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+ // Note: Boot image is in the low 4GiB and the entry is always 32-bit, so emit a 32-bit load.
+ EmitPcRelativeLwuPlaceholder(info_low, dest, dest);
+}
+
+void CodeGeneratorRISCV64::LoadClassRootForIntrinsic(XRegister dest, ClassRoot class_root) {
+ if (GetCompilerOptions().IsBootImage()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> klass = GetClassRoot(class_root);
+ TypeReference target_type(&klass->GetDexFile(), klass->GetDexTypeIndex());
+ LoadTypeForBootImageIntrinsic(dest, target_type);
+ } else {
+ uint32_t boot_image_offset = GetBootImageOffset(class_root);
+ if (GetCompilerOptions().GetCompilePic()) {
+ LoadBootImageRelRoEntry(dest, boot_image_offset);
+ } else {
+ DCHECK(GetCompilerOptions().IsJitCompiler());
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ Loadwu(dest, DeduplicateBootImageAddressLiteral(reinterpret_cast<uintptr_t>(address)));
+ }
+ }
+}
+
void CodeGeneratorRISCV64::LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke) {
switch (load_kind) {
case MethodLoadKind::kBootImageLinkTimePcRelative: {
@@ -6485,12 +6548,7 @@ void CodeGeneratorRISCV64::LoadMethod(MethodLoadKind load_kind, Location temp, H
}
case MethodLoadKind::kBootImageRelRo: {
uint32_t boot_image_offset = GetBootImageOffset(invoke);
- PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
- EmitPcRelativeAuipcPlaceholder(info_high, temp.AsRegister<XRegister>());
- PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
- // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- EmitPcRelativeLwuPlaceholder(
- info_low, temp.AsRegister<XRegister>(), temp.AsRegister<XRegister>());
+ LoadBootImageRelRoEntry(temp.AsRegister<XRegister>(), boot_image_offset);
break;
}
case MethodLoadKind::kBssEntry: {
@@ -6700,13 +6758,13 @@ void CodeGeneratorRISCV64::UnpoisonHeapReference(XRegister reg) {
__ ZextW(reg, reg); // Zero-extend the 32-bit ref.
}
-inline void CodeGeneratorRISCV64::MaybePoisonHeapReference(XRegister reg) {
+void CodeGeneratorRISCV64::MaybePoisonHeapReference(XRegister reg) {
if (kPoisonHeapReferences) {
PoisonHeapReference(reg);
}
}
-inline void CodeGeneratorRISCV64::MaybeUnpoisonHeapReference(XRegister reg) {
+void CodeGeneratorRISCV64::MaybeUnpoisonHeapReference(XRegister reg) {
if (kPoisonHeapReferences) {
UnpoisonHeapReference(reg);
}
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index a2bba17229..7076af05c1 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -189,8 +189,6 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
V(VarHandleCompareAndExchangeAcquire) \
V(VarHandleCompareAndExchangeRelease) \
V(VarHandleCompareAndSet) \
- V(VarHandleGet) \
- V(VarHandleGetAcquire) \
V(VarHandleGetAndAdd) \
V(VarHandleGetAndAddAcquire) \
V(VarHandleGetAndAddRelease) \
@@ -206,12 +204,6 @@ static constexpr size_t kRuntimeParameterFpuRegistersLength =
V(VarHandleGetAndSet) \
V(VarHandleGetAndSetAcquire) \
V(VarHandleGetAndSetRelease) \
- V(VarHandleGetOpaque) \
- V(VarHandleGetVolatile) \
- V(VarHandleSet) \
- V(VarHandleSetOpaque) \
- V(VarHandleSetRelease) \
- V(VarHandleSetVolatile) \
V(VarHandleWeakCompareAndSet) \
V(VarHandleWeakCompareAndSetAcquire) \
V(VarHandleWeakCompareAndSetPlain) \
@@ -406,6 +398,30 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers (if any).
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ XRegister obj,
+ uint32_t offset,
+ ReadBarrierOption read_barrier_option,
+ Riscv64Label* label_low = nullptr);
+
+ void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
+ void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
+
+ // Sequentially consistent store. Used for volatile fields and intrinsics.
+ // The `instruction` argument is for recording an implicit null check stack map with the
+ // store instruction which may not be the last instruction emitted by `StoreSeqCst()`.
+ void StoreSeqCst(Location value,
+ XRegister rs1,
+ int32_t offset,
+ DataType::Type type,
+ HInstruction* instruction = nullptr);
+
protected:
void GenerateClassInitializationCheck(SlowPathCodeRISCV64* slow_path, XRegister class_reg);
void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, XRegister temp);
@@ -450,18 +466,6 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
Location maybe_temp,
ReadBarrierOption read_barrier_option);
- // Generate a GC root reference load:
- //
- // root <- *(obj + offset)
- //
- // while honoring read barriers (if any).
- void GenerateGcRootFieldLoad(HInstruction* instruction,
- Location root,
- XRegister obj,
- uint32_t offset,
- ReadBarrierOption read_barrier_option,
- Riscv64Label* label_low = nullptr);
-
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Riscv64Label* true_target,
@@ -528,9 +532,6 @@ class InstructionCodeGeneratorRISCV64 : public InstructionCodeGenerator {
void FMvX(XRegister rd, FRegister rs1, DataType::Type type);
void FClass(XRegister rd, FRegister rs1, DataType::Type type);
- void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
- void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
-
Riscv64Assembler* const assembler_;
CodeGeneratorRISCV64* const codegen_;
@@ -597,7 +598,10 @@ class CodeGeneratorRISCV64 : public CodeGenerator {
const Riscv64Assembler& GetAssembler() const override { return assembler_; }
HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
- HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
+
+ InstructionCodeGeneratorRISCV64* GetInstructionVisitor() override {
+ return &instruction_visitor_;
+ }
void MaybeGenerateInlineCacheCheck(HInstruction* instruction, XRegister klass);
@@ -736,6 +740,10 @@ class CodeGeneratorRISCV64 : public CodeGenerator {
Handle<mirror::Class> handle);
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
+ void LoadTypeForBootImageIntrinsic(XRegister dest, TypeReference target_type);
+ void LoadBootImageRelRoEntry(XRegister dest, uint32_t boot_image_offset);
+ void LoadClassRootForIntrinsic(XRegister dest, ClassRoot class_root);
+
void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
Location temp,
@@ -769,8 +777,8 @@ class CodeGeneratorRISCV64 : public CodeGenerator {
Location index,
Location temp,
bool needs_null_check);
- // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier
- // and GenerateArrayLoadWithBakerReadBarrier.
+ // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+ // GenerateArrayLoadWithBakerReadBarrier and intrinsics.
void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
XRegister obj,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5296ed071e..a727e6da9a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5768,7 +5768,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
void CodeGeneratorX86::LoadIntrinsicDeclaringClass(Register reg, HInvokeStaticOrDirect* invoke) {
DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
if (GetCompilerOptions().IsBootImage()) {
- // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ // Load the type the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
DCHECK(method_address != nullptr);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 8f8690ddf6..58d739397c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1338,7 +1338,7 @@ void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_im
void CodeGeneratorX86_64::LoadIntrinsicDeclaringClass(CpuRegister reg, HInvoke* invoke) {
DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
if (GetCompilerOptions().IsBootImage()) {
- // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ // Load the type the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
__ leal(reg,
Address::Absolute(CodeGeneratorX86_64::kPlaceholder32BitOffset, /* no_rip= */ false));
MethodReference target_method = invoke->GetResolvedMethodReference();
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 2ec2134fb1..f5c6340347 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -4695,21 +4695,21 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
__ Mov(target.offset, target_field->GetOffset().Uint32Value());
} else {
// For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
+ // so we can use `target.object` as temporary for the `ArtField*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtField*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtField*`.
+ Register field = (expected_coordinates_count == 0) ? target.object : target.offset;
const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
const MemberOffset offset_offset = ArtField::OffsetOffset();
- // Load the ArtField, the offset and, if needed, declaring class.
- __ Ldr(method.X(), HeapOperand(varhandle, art_field_offset.Int32Value()));
- __ Ldr(target.offset, MemOperand(method.X(), offset_offset.Int32Value()));
+ // Load the ArtField*, the offset and, if needed, declaring class.
+ __ Ldr(field.X(), HeapOperand(varhandle, art_field_offset.Int32Value()));
+ __ Ldr(target.offset, MemOperand(field.X(), offset_offset.Int32Value()));
if (expected_coordinates_count == 0u) {
codegen->GenerateGcRootFieldLoad(invoke,
LocationFrom(target.object),
- method.X(),
+ field.X(),
ArtField::DeclaringClassOffset().Int32Value(),
/*fixup_label=*/nullptr,
GetCompilerReadBarrierOption());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index d31593cf9f..a63b32ab9f 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -4335,21 +4335,21 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
__ Mov(target.offset, target_field->GetOffset().Uint32Value());
} else {
// For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- vixl32::Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
+ // so we can use `target.object` as temporary for the `ArtField*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtField*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtField*`.
+ vixl32::Register field = (expected_coordinates_count == 0) ? target.object : target.offset;
const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
const MemberOffset offset_offset = ArtField::OffsetOffset();
- // Load the ArtField, the offset and, if needed, declaring class.
- __ Ldr(method, MemOperand(varhandle, art_field_offset.Int32Value()));
- __ Ldr(target.offset, MemOperand(method, offset_offset.Int32Value()));
+ // Load the ArtField*, the offset and, if needed, declaring class.
+ __ Ldr(field, MemOperand(varhandle, art_field_offset.Int32Value()));
+ __ Ldr(target.offset, MemOperand(field, offset_offset.Int32Value()));
if (expected_coordinates_count == 0u) {
codegen->GenerateGcRootFieldLoad(invoke,
LocationFrom(target.object),
- method,
+ field,
ArtField::DeclaringClassOffset().Int32Value(),
GetCompilerReadBarrierOption());
}
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index ba541b36f3..ba5a3cd908 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -219,10 +219,52 @@ void IntrinsicCodeGeneratorRISCV64::VisitMemoryPokeShortNative(HInvoke* invoke)
EmitMemoryPoke(invoke, [&](XRegister rs2, XRegister rs1) { __ Sh(rs2, rs1, 0); });
}
-template <typename EmitOp>
-void EmitIntegralUnOp(HInvoke* invoke, EmitOp&& emit_op) {
+static void GenerateReverseBytes(Riscv64Assembler* assembler,
+ Location rd,
+ XRegister rs1,
+ DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kUint16:
+ // There is no 16-bit reverse bytes instruction.
+ __ Rev8(rd.AsRegister<XRegister>(), rs1);
+ __ Srli(rd.AsRegister<XRegister>(), rd.AsRegister<XRegister>(), 48);
+ break;
+ case DataType::Type::kInt16:
+ // There is no 16-bit reverse bytes instruction.
+ __ Rev8(rd.AsRegister<XRegister>(), rs1);
+ __ Srai(rd.AsRegister<XRegister>(), rd.AsRegister<XRegister>(), 48);
+ break;
+ case DataType::Type::kInt32:
+ // There is no 32-bit reverse bytes instruction.
+ __ Rev8(rd.AsRegister<XRegister>(), rs1);
+ __ Srai(rd.AsRegister<XRegister>(), rd.AsRegister<XRegister>(), 32);
+ break;
+ case DataType::Type::kInt64:
+ __ Rev8(rd.AsRegister<XRegister>(), rs1);
+ break;
+ case DataType::Type::kFloat32:
+ // There is no 32-bit reverse bytes instruction.
+ __ Rev8(rs1, rs1); // Note: Clobbers `rs1`.
+ __ Srai(rs1, rs1, 32);
+ __ FMvWX(rd.AsFpuRegister<FRegister>(), rs1);
+ break;
+ case DataType::Type::kFloat64:
+ __ Rev8(rs1, rs1); // Note: Clobbers `rs1`.
+ __ FMvDX(rd.AsFpuRegister<FRegister>(), rs1);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type: " << type;
+ UNREACHABLE();
+ }
+}
+
+static void GenerateReverseBytes(Riscv64Assembler* assembler,
+ HInvoke* invoke,
+ DataType::Type type) {
+ DCHECK_EQ(type, invoke->GetType());
LocationSummary* locations = invoke->GetLocations();
- emit_op(locations->Out().AsRegister<XRegister>(), locations->InAt(0).AsRegister<XRegister>());
+ GenerateReverseBytes(
+ assembler, locations->Out(), locations->InAt(0).AsRegister<XRegister>(), type);
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -230,12 +272,7 @@ void IntrinsicLocationsBuilderRISCV64::VisitIntegerReverseBytes(HInvoke* invoke)
}
void IntrinsicCodeGeneratorRISCV64::VisitIntegerReverseBytes(HInvoke* invoke) {
- Riscv64Assembler* assembler = GetAssembler();
- EmitIntegralUnOp(invoke, [&](XRegister rd, XRegister rs1) {
- // There is no 32-bit reverse bytes instruction.
- __ Rev8(rd, rs1);
- __ Srai(rd, rd, 32);
- });
+ GenerateReverseBytes(GetAssembler(), invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -243,8 +280,7 @@ void IntrinsicLocationsBuilderRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorRISCV64::VisitLongReverseBytes(HInvoke* invoke) {
- Riscv64Assembler* assembler = GetAssembler();
- EmitIntegralUnOp(invoke, [&](XRegister rd, XRegister rs1) { __ Rev8(rd, rs1); });
+ GenerateReverseBytes(GetAssembler(), invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderRISCV64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -252,12 +288,13 @@ void IntrinsicLocationsBuilderRISCV64::VisitShortReverseBytes(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorRISCV64::VisitShortReverseBytes(HInvoke* invoke) {
- Riscv64Assembler* assembler = GetAssembler();
- EmitIntegralUnOp(invoke, [&](XRegister rd, XRegister rs1) {
- // There is no 16-bit reverse bytes instruction.
- __ Rev8(rd, rs1);
- __ Srai(rd, rd, 48);
- });
+ GenerateReverseBytes(GetAssembler(), invoke, DataType::Type::kInt16);
+}
+
+template <typename EmitOp>
+void EmitIntegralUnOp(HInvoke* invoke, EmitOp&& emit_op) {
+ LocationSummary* locations = invoke->GetLocations();
+ emit_op(locations->Out().AsRegister<XRegister>(), locations->InAt(0).AsRegister<XRegister>());
}
void IntrinsicLocationsBuilderRISCV64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -456,6 +493,812 @@ void IntrinsicCodeGeneratorRISCV64::VisitStringIndexOfAfter(HInvoke* invoke) {
GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
+enum class GetAndUpdateOp {
+ kSet,
+ kAdd,
+ kAddWithByteSwap,
+ kAnd,
+ kOr,
+ kXor
+};
+
+class VarHandleSlowPathRISCV64 : public IntrinsicSlowPathRISCV64 {
+ public:
+ VarHandleSlowPathRISCV64(HInvoke* invoke, std::memory_order order)
+ : IntrinsicSlowPathRISCV64(invoke),
+ order_(order),
+ return_success_(false),
+ strong_(false),
+ get_and_update_op_(GetAndUpdateOp::kAdd) {
+ }
+
+ Riscv64Label* GetByteArrayViewCheckLabel() {
+ return &byte_array_view_check_label_;
+ }
+
+ Riscv64Label* GetNativeByteOrderLabel() {
+ return &native_byte_order_label_;
+ }
+
+ void SetCompareAndSetOrExchangeArgs(bool return_success, bool strong) {
+ if (return_success) {
+ DCHECK(GetAccessModeTemplate() == mirror::VarHandle::AccessModeTemplate::kCompareAndSet);
+ } else {
+ DCHECK(GetAccessModeTemplate() == mirror::VarHandle::AccessModeTemplate::kCompareAndExchange);
+ }
+ return_success_ = return_success;
+ strong_ = strong;
+ }
+
+ void SetGetAndUpdateOp(GetAndUpdateOp get_and_update_op) {
+ DCHECK(GetAccessModeTemplate() == mirror::VarHandle::AccessModeTemplate::kGetAndUpdate);
+ get_and_update_op_ = get_and_update_op;
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) override {
+ if (GetByteArrayViewCheckLabel()->IsLinked()) {
+ EmitByteArrayViewCode(codegen_in);
+ }
+ IntrinsicSlowPathRISCV64::EmitNativeCode(codegen_in);
+ }
+
+ private:
+ HInvoke* GetInvoke() const {
+ return GetInstruction()->AsInvoke();
+ }
+
+ mirror::VarHandle::AccessModeTemplate GetAccessModeTemplate() const {
+ return mirror::VarHandle::GetAccessModeTemplateByIntrinsic(GetInvoke()->GetIntrinsic());
+ }
+
+ void EmitByteArrayViewCode(CodeGenerator* codegen_in);
+
+ Riscv64Label byte_array_view_check_label_;
+ Riscv64Label native_byte_order_label_;
+ // Shared parameter for all VarHandle intrinsics.
+ std::memory_order order_;
+ // Extra arguments for GenerateVarHandleCompareAndSetOrExchange().
+ bool return_success_;
+ bool strong_;
+ // Extra argument for GenerateVarHandleGetAndUpdate().
+ GetAndUpdateOp get_and_update_op_;
+};
+
+// Generate subtype check without read barriers.
+static void GenerateSubTypeObjectCheckNoReadBarrier(CodeGeneratorRISCV64* codegen,
+ SlowPathCodeRISCV64* slow_path,
+ XRegister object,
+ XRegister type,
+ bool object_can_be_null = true) {
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+
+ const MemberOffset class_offset = mirror::Object::ClassOffset();
+ const MemberOffset super_class_offset = mirror::Class::SuperClassOffset();
+
+ Riscv64Label success;
+ if (object_can_be_null) {
+ __ Beqz(object, &success);
+ }
+
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+
+ // Note: The `type` can be `TMP`. Taken branches to `success` and `loop` should be near and never
+ // expand. Only the branch to `slow_path` can theoretically expand and clobber `TMP` when taken.
+ // (`TMP` is clobbered only if the target distance is at least 1MiB.)
+ // FIXME(riscv64): Use "bare" branches. (And add some assembler tests for them.)
+ __ Loadwu(temp, object, class_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp);
+ Riscv64Label loop;
+ __ Bind(&loop);
+ __ Beq(type, temp, &success);
+ // We may not have another scratch register for `Loadwu()`. Use `Lwu()` directly.
+ DCHECK(IsInt<12>(super_class_offset.Int32Value()));
+ __ Lwu(temp, temp, super_class_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp);
+ __ Beqz(temp, slow_path->GetEntryLabel());
+ __ J(&loop);
+ __ Bind(&success);
+}
+
+// Check access mode and the primitive type from VarHandle.varType.
+// Check reference arguments against the VarHandle.varType; for references this is a subclass
+// check without read barrier, so it can have false negatives which we handle in the slow path.
+static void GenerateVarHandleAccessModeAndVarTypeChecks(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ SlowPathCodeRISCV64* slow_path,
+ DataType::Type type) {
+ mirror::VarHandle::AccessMode access_mode =
+ mirror::VarHandle::GetAccessModeByIntrinsic(invoke->GetIntrinsic());
+ Primitive::Type primitive_type = DataTypeToPrimitive(type);
+
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+ XRegister varhandle = locations->InAt(0).AsRegister<XRegister>();
+
+ const MemberOffset var_type_offset = mirror::VarHandle::VarTypeOffset();
+ const MemberOffset access_mode_bit_mask_offset = mirror::VarHandle::AccessModesBitMaskOffset();
+ const MemberOffset primitive_type_offset = mirror::Class::PrimitiveTypeOffset();
+
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+ XRegister temp2 = srs.AllocateXRegister();
+
+ // Check that the operation is permitted.
+ __ Loadw(temp, varhandle, access_mode_bit_mask_offset.Int32Value());
+ DCHECK_LT(enum_cast<uint32_t>(access_mode), 31u); // We cannot avoid the shift below.
+ __ Slliw(temp, temp, 31 - enum_cast<uint32_t>(access_mode)); // Shift tested bit to sign bit.
+ __ Bgez(temp, slow_path->GetEntryLabel()); // If not permitted, go to slow path.
+
+ // For primitive types, we do not need a read barrier when loading a reference only for loading
+ // constant field through the reference. For reference types, we deliberately avoid the read
+ // barrier, letting the slow path handle the false negatives.
+ __ Loadw(temp, varhandle, var_type_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp);
+
+ // Check the varType.primitiveType field against the type we're trying to use.
+ __ Loadhu(temp2, temp, primitive_type_offset.Int32Value());
+ if (primitive_type == Primitive::kPrimNot) {
+ static_assert(Primitive::kPrimNot == 0);
+ __ Bnez(temp2, slow_path->GetEntryLabel());
+ } else {
+ __ Li(temp, enum_cast<int32_t>(primitive_type)); // `temp` can be clobbered.
+ __ Bne(temp2, temp, slow_path->GetEntryLabel());
+ }
+
+ srs.FreeXRegister(temp2);
+
+ if (type == DataType::Type::kReference) {
+ // Check reference arguments against the varType.
+ // False negatives due to varType being an interface or array type
+ // or due to the missing read barrier are handled by the slow path.
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ uint32_t arguments_start = /* VarHandle object */ 1u + expected_coordinates_count;
+ uint32_t number_of_arguments = invoke->GetNumberOfArguments();
+ for (size_t arg_index = arguments_start; arg_index != number_of_arguments; ++arg_index) {
+ HInstruction* arg = invoke->InputAt(arg_index);
+ DCHECK_EQ(arg->GetType(), DataType::Type::kReference);
+ if (!arg->IsNullConstant()) {
+ XRegister arg_reg = locations->InAt(arg_index).AsRegister<XRegister>();
+ GenerateSubTypeObjectCheckNoReadBarrier(codegen, slow_path, arg_reg, temp);
+ }
+ }
+ }
+}
+
+static void GenerateVarHandleStaticFieldCheck(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ SlowPathCodeRISCV64* slow_path) {
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ XRegister varhandle = invoke->GetLocations()->InAt(0).AsRegister<XRegister>();
+
+ const MemberOffset coordinate_type0_offset = mirror::VarHandle::CoordinateType0Offset();
+
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+
+ // Check that the VarHandle references a static field by checking that coordinateType0 == null.
+ // Do not emit read barrier (or unpoison the reference) for comparing to null.
+ __ Loadwu(temp, varhandle, coordinate_type0_offset.Int32Value());
+ __ Bnez(temp, slow_path->GetEntryLabel());
+}
+
+static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ SlowPathCodeRISCV64* slow_path) {
+ VarHandleOptimizations optimizations(invoke);
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+ XRegister varhandle = locations->InAt(0).AsRegister<XRegister>();
+ XRegister object = locations->InAt(1).AsRegister<XRegister>();
+
+ const MemberOffset coordinate_type0_offset = mirror::VarHandle::CoordinateType0Offset();
+ const MemberOffset coordinate_type1_offset = mirror::VarHandle::CoordinateType1Offset();
+
+ // Null-check the object.
+ if (!optimizations.GetSkipObjectNullCheck()) {
+ __ Beqz(object, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+
+ // Check that the VarHandle references an instance field by checking that
+ // coordinateType1 == null. coordinateType0 should not be null, but this is handled by the
+ // type compatibility check with the source object's type, which will fail for null.
+ __ Loadwu(temp, varhandle, coordinate_type1_offset.Int32Value());
+ // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
+ __ Bnez(temp, slow_path->GetEntryLabel());
+
+ // Check that the object has the correct type.
+ // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
+ __ Loadwu(temp, varhandle, coordinate_type0_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp);
+ GenerateSubTypeObjectCheckNoReadBarrier(
+ codegen, slow_path, object, temp, /*object_can_be_null=*/ false);
+ }
+}
+
+static void GenerateVarHandleArrayChecks(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ VarHandleSlowPathRISCV64* slow_path) {
+ VarHandleOptimizations optimizations(invoke);
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+ XRegister varhandle = locations->InAt(0).AsRegister<XRegister>();
+ XRegister object = locations->InAt(1).AsRegister<XRegister>();
+ XRegister index = locations->InAt(2).AsRegister<XRegister>();
+ DataType::Type value_type =
+ GetVarHandleExpectedValueType(invoke, /*expected_coordinates_count=*/ 2u);
+ Primitive::Type primitive_type = DataTypeToPrimitive(value_type);
+
+ const MemberOffset coordinate_type0_offset = mirror::VarHandle::CoordinateType0Offset();
+ const MemberOffset coordinate_type1_offset = mirror::VarHandle::CoordinateType1Offset();
+ const MemberOffset component_type_offset = mirror::Class::ComponentTypeOffset();
+ const MemberOffset primitive_type_offset = mirror::Class::PrimitiveTypeOffset();
+ const MemberOffset class_offset = mirror::Object::ClassOffset();
+ const MemberOffset array_length_offset = mirror::Array::LengthOffset();
+
+ // Null-check the object.
+ if (!optimizations.GetSkipObjectNullCheck()) {
+ __ Beqz(object, slow_path->GetEntryLabel());
+ }
+
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+ XRegister temp2 = srs.AllocateXRegister();
+
+ // Check that the VarHandle references an array, byte array view or ByteBuffer by checking
+ // that coordinateType1 != null. If that's true, coordinateType1 shall be int.class and
+ // coordinateType0 shall not be null but we do not explicitly verify that.
+ __ Loadwu(temp, varhandle, coordinate_type1_offset.Int32Value());
+ // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
+ __ Beqz(temp, slow_path->GetEntryLabel());
+
+ // Check object class against componentType0.
+ //
+ // This is an exact check and we defer other cases to the runtime. This includes
+ // conversion to array of superclass references, which is valid but subsequently
+ // requires all update operations to check that the value can indeed be stored.
+ // We do not want to perform such extra checks in the intrinsified code.
+ //
+ // We do this check without read barrier, so there can be false negatives which we
+ // defer to the slow path. There shall be no false negatives for array classes in the
+ // boot image (including Object[] and primitive arrays) because they are non-movable.
+ __ Loadwu(temp, varhandle, coordinate_type0_offset.Int32Value());
+ __ Loadwu(temp2, object, class_offset.Int32Value());
+ __ Bne(temp, temp2, slow_path->GetEntryLabel());
+
+ // Check that the coordinateType0 is an array type. We do not need a read barrier
+ // for loading constant reference fields (or chains of them) for comparison with null,
+ // nor for finally loading a constant primitive field (primitive type) below.
+ codegen->MaybeUnpoisonHeapReference(temp);
+ __ Loadwu(temp2, temp, component_type_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp2);
+ __ Beqz(temp2, slow_path->GetEntryLabel());
+
+ // Check that the array component type matches the primitive type.
+ __ Loadhu(temp, temp2, primitive_type_offset.Int32Value());
+ if (primitive_type == Primitive::kPrimNot) {
+ static_assert(Primitive::kPrimNot == 0);
+ __ Bnez(temp, slow_path->GetEntryLabel());
+ } else {
+ // With the exception of `kPrimNot` (handled above), `kPrimByte` and `kPrimBoolean`,
+ // we shall check for a byte array view in the slow path.
+ // The check requires the ByteArrayViewVarHandle.class to be in the boot image,
+ // so we cannot emit that if we're JITting without boot image.
+ bool boot_image_available =
+ codegen->GetCompilerOptions().IsBootImage() ||
+ !Runtime::Current()->GetHeap()->GetBootImageSpaces().empty();
+ bool can_be_view = (DataType::Size(value_type) != 1u) && boot_image_available;
+ Riscv64Label* slow_path_label =
+ can_be_view ? slow_path->GetByteArrayViewCheckLabel() : slow_path->GetEntryLabel();
+ __ Li(temp2, enum_cast<int32_t>(primitive_type));
+ __ Bne(temp, temp2, slow_path_label);
+ }
+
+ // Check for array index out of bounds.
+ __ Loadw(temp, object, array_length_offset.Int32Value());
+ __ Bgeu(index, temp, slow_path->GetEntryLabel());
+}
+
+static void GenerateVarHandleCoordinateChecks(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ VarHandleSlowPathRISCV64* slow_path) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ if (expected_coordinates_count == 0u) {
+ GenerateVarHandleStaticFieldCheck(invoke, codegen, slow_path);
+ } else if (expected_coordinates_count == 1u) {
+ GenerateVarHandleInstanceFieldChecks(invoke, codegen, slow_path);
+ } else {
+ DCHECK_EQ(expected_coordinates_count, 2u);
+ GenerateVarHandleArrayChecks(invoke, codegen, slow_path);
+ }
+}
+
+static VarHandleSlowPathRISCV64* GenerateVarHandleChecks(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ std::memory_order order,
+ DataType::Type type) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetUseKnownBootImageVarHandle()) {
+ DCHECK_NE(expected_coordinates_count, 2u);
+ if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
+ return nullptr;
+ }
+ }
+
+ VarHandleSlowPathRISCV64* slow_path =
+ new (codegen->GetScopedAllocator()) VarHandleSlowPathRISCV64(invoke, order);
+ codegen->AddSlowPath(slow_path);
+
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ }
+ GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
+
+ return slow_path;
+}
+
+struct VarHandleTarget {
+ XRegister object; // The object holding the value to operate on.
+ XRegister offset; // The offset of the value to operate on.
+};
+
+static VarHandleTarget GetVarHandleTarget(HInvoke* invoke) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ LocationSummary* locations = invoke->GetLocations();
+
+ VarHandleTarget target;
+ // The temporary allocated for loading the offset.
+ target.offset = locations->GetTemp(0u).AsRegister<XRegister>();
+ // The reference to the object that holds the value to operate on.
+ target.object = (expected_coordinates_count == 0u)
+ ? locations->GetTemp(1u).AsRegister<XRegister>()
+ : locations->InAt(1).AsRegister<XRegister>();
+ return target;
+}
+
+static void GenerateVarHandleTarget(HInvoke* invoke,
+ const VarHandleTarget& target,
+ CodeGeneratorRISCV64* codegen) {
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+ XRegister varhandle = locations->InAt(0).AsRegister<XRegister>();
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+
+ if (expected_coordinates_count <= 1u) {
+ if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ArtField* target_field = GetBootImageVarHandleField(invoke);
+ if (expected_coordinates_count == 0u) {
+ ObjPtr<mirror::Class> declaring_class = target_field->GetDeclaringClass();
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
+ uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(declaring_class);
+ codegen->LoadBootImageRelRoEntry(target.object, boot_image_offset);
+ } else {
+ codegen->LoadTypeForBootImageIntrinsic(
+ target.object,
+ TypeReference(&declaring_class->GetDexFile(), declaring_class->GetDexTypeIndex()));
+ }
+ }
+ __ Li(target.offset, target_field->GetOffset().Uint32Value());
+ } else {
+ // For static fields, we need to fill the `target.object` with the declaring class,
+ // so we can use `target.object` as temporary for the `ArtField*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtField*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtField*`.
+ XRegister field = (expected_coordinates_count == 0) ? target.object : target.offset;
+
+ const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
+ const MemberOffset offset_offset = ArtField::OffsetOffset();
+
+ // Load the ArtField*, the offset and, if needed, declaring class.
+ __ Loadd(field, varhandle, art_field_offset.Int32Value());
+ __ Loadwu(target.offset, field, offset_offset.Int32Value());
+ if (expected_coordinates_count == 0u) {
+ codegen->GetInstructionVisitor()->GenerateGcRootFieldLoad(
+ invoke,
+ Location::RegisterLocation(target.object),
+ field,
+ ArtField::DeclaringClassOffset().Int32Value(),
+ GetCompilerReadBarrierOption());
+ }
+ }
+ } else {
+ DCHECK_EQ(expected_coordinates_count, 2u);
+ DataType::Type value_type =
+ GetVarHandleExpectedValueType(invoke, /*expected_coordinates_count=*/ 2u);
+ MemberOffset data_offset = mirror::Array::DataOffset(DataType::Size(value_type));
+
+ XRegister index = locations->InAt(2).AsRegister<XRegister>();
+ __ Li(target.offset, data_offset.Int32Value());
+ codegen->GetInstructionVisitor()->ShNAdd(target.offset, index, target.offset, value_type);
+ }
+}
+
+static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ DataType::Type return_type = invoke->GetType();
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ // Require coordinates in registers. These are the object holding the value
+ // to operate on (except for static fields) and index (for arrays and views).
+ for (size_t i = 0; i != expected_coordinates_count; ++i) {
+ locations->SetInAt(/* VarHandle object */ 1u + i, Location::RequiresRegister());
+ }
+ if (return_type != DataType::Type::kVoid) {
+ if (DataType::IsFloatingPointType(return_type)) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister());
+ }
+ }
+ uint32_t arguments_start = /* VarHandle object */ 1u + expected_coordinates_count;
+ uint32_t number_of_arguments = invoke->GetNumberOfArguments();
+ for (size_t arg_index = arguments_start; arg_index != number_of_arguments; ++arg_index) {
+ HInstruction* arg = invoke->InputAt(arg_index);
+ if (IsZeroBitPattern(arg)) {
+ locations->SetInAt(arg_index, Location::ConstantLocation(arg));
+ } else if (DataType::IsFloatingPointType(arg->GetType())) {
+ locations->SetInAt(arg_index, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(arg_index, Location::RequiresRegister());
+ }
+ }
+
+ // Add a temporary for offset.
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
+ // To preserve the offset value across the non-Baker read barrier slow path
+ // for loading the declaring class, use a fixed callee-save register.
+ constexpr int first_callee_save = CTZ(kRiscv64CalleeSaveRefSpills);
+ locations->AddTemp(Location::RegisterLocation(first_callee_save));
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ if (expected_coordinates_count == 0u) {
+ // Add a temporary to hold the declaring class.
+ locations->AddTemp(Location::RequiresRegister());
+ }
+
+ return locations;
+}
+
+static void CreateVarHandleGetLocations(HInvoke* invoke) {
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetDoNotIntrinsify()) {
+ return;
+ }
+
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ invoke->GetType() == DataType::Type::kReference &&
+ invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
+ invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
+ // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
+ // the passed reference and reloads it from the field. This gets the memory visibility
+ // wrong for Acquire/Volatile operations. b/173104084
+ return;
+ }
+
+ CreateVarHandleCommonLocations(invoke);
+}
+
+static void GenerateVarHandleGet(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ std::memory_order order,
+ bool byte_swap = false) {
+ DataType::Type type = invoke->GetType();
+ DCHECK_NE(type, DataType::Type::kVoid);
+
+ LocationSummary* locations = invoke->GetLocations();
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ Location out = locations->Out();
+
+ VarHandleTarget target = GetVarHandleTarget(invoke);
+ VarHandleSlowPathRISCV64* slow_path = nullptr;
+ if (!byte_swap) {
+ slow_path = GenerateVarHandleChecks(invoke, codegen, order, type);
+ GenerateVarHandleTarget(invoke, target, codegen);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
+ }
+
+ bool seq_cst_barrier = (order == std::memory_order_seq_cst);
+ bool acquire_barrier = seq_cst_barrier || (order == std::memory_order_acquire);
+ DCHECK(acquire_barrier || order == std::memory_order_relaxed);
+
+ if (seq_cst_barrier) {
+ codegen->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+
+ // Load the value from the target location.
+ if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ // TODO(riscv64): Revisit when we add checking if the holder is black.
+ Location index_and_temp_loc = Location::RegisterLocation(target.offset);
+ codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
+ out,
+ target.object,
+ /*offset=*/ 0,
+ index_and_temp_loc,
+ index_and_temp_loc,
+ /*needs_null_check=*/ false);
+ DCHECK(!byte_swap);
+ } else {
+ ScratchRegisterScope srs(assembler);
+ XRegister address = srs.AllocateXRegister();
+ __ Add(address, target.object, target.offset);
+ Location load_loc = out;
+ DataType::Type load_type = type;
+ if (byte_swap && DataType::IsFloatingPointType(type)) {
+ load_loc = Location::RegisterLocation(target.offset); // Load to the offset temporary.
+ load_type = (type == DataType::Type::kFloat32) ? DataType::Type::kInt32
+ : DataType::Type::kInt64;
+ }
+ codegen->GetInstructionVisitor()->Load(load_loc, address, /*offset=*/ 0, load_type);
+ if (type == DataType::Type::kReference) {
+ DCHECK(!byte_swap);
+ Location object_loc = Location::RegisterLocation(target.object);
+ Location offset_loc = Location::RegisterLocation(target.offset);
+ codegen->MaybeGenerateReadBarrierSlow(
+ invoke, out, out, object_loc, /*offset=*/ 0u, /*index=*/ offset_loc);
+ } else if (byte_swap) {
+ GenerateReverseBytes(assembler, out, load_loc.AsRegister<XRegister>(), type);
+ }
+ }
+
+ if (acquire_barrier) {
+ codegen->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGet(HInvoke* invoke) {
+ CreateVarHandleGetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGet(HInvoke* invoke) {
+ GenerateVarHandleGet(invoke, codegen_, std::memory_order_relaxed);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetOpaque(HInvoke* invoke) {
+ CreateVarHandleGetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetOpaque(HInvoke* invoke) {
+ GenerateVarHandleGet(invoke, codegen_, std::memory_order_relaxed);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetAcquire(HInvoke* invoke) {
+ CreateVarHandleGetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetAcquire(HInvoke* invoke) {
+ GenerateVarHandleGet(invoke, codegen_, std::memory_order_acquire);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetVolatile(HInvoke* invoke) {
+ CreateVarHandleGetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetVolatile(HInvoke* invoke) {
+ GenerateVarHandleGet(invoke, codegen_, std::memory_order_seq_cst);
+}
+
+static void CreateVarHandleSetLocations(HInvoke* invoke) {
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetDoNotIntrinsify()) {
+ return;
+ }
+
+ CreateVarHandleCommonLocations(invoke);
+}
+
+static void GenerateVarHandleSet(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ std::memory_order order,
+ bool byte_swap = false) {
+ uint32_t value_index = invoke->GetNumberOfArguments() - 1;
+ DataType::Type value_type = GetDataTypeFromShorty(invoke, value_index);
+
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ Location value = invoke->GetLocations()->InAt(value_index);
+
+ VarHandleTarget target = GetVarHandleTarget(invoke);
+ VarHandleSlowPathRISCV64* slow_path = nullptr;
+ if (!byte_swap) {
+ slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
+ GenerateVarHandleTarget(invoke, target, codegen);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
+ }
+
+ {
+ ScratchRegisterScope srs(assembler);
+ XRegister address = srs.AllocateXRegister();
+ __ Add(address, target.object, target.offset);
+
+ if (byte_swap) {
+ DCHECK(!value.IsConstant()); // Zero uses the main path as it does not need a byte swap.
+ // The offset is no longer needed, so reuse the offset temporary for the byte-swapped value.
+ Location new_value = Location::RegisterLocation(target.offset);
+ if (DataType::IsFloatingPointType(value_type)) {
+ value_type = (value_type == DataType::Type::kFloat32) ? DataType::Type::kInt32
+ : DataType::Type::kInt64;
+ codegen->MoveLocation(new_value, value, value_type);
+ value = new_value;
+ }
+ GenerateReverseBytes(assembler, new_value, value.AsRegister<XRegister>(), value_type);
+ value = new_value;
+ }
+
+ if (order == std::memory_order_seq_cst) {
+ codegen->GetInstructionVisitor()->StoreSeqCst(value, address, /*offset=*/ 0, value_type);
+ } else {
+ if (order == std::memory_order_release) {
+ codegen->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ } else {
+ DCHECK(order == std::memory_order_relaxed);
+ }
+ codegen->GetInstructionVisitor()->Store(value, address, /*offset=*/ 0, value_type);
+ }
+ }
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(value_type, invoke->InputAt(value_index))) {
+ codegen->MarkGCCard(target.object, value.AsRegister<XRegister>(), /* emit_null_check= */ true);
+ }
+
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSet(HInvoke* invoke) {
+ CreateVarHandleSetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSet(HInvoke* invoke) {
+ GenerateVarHandleSet(invoke, codegen_, std::memory_order_relaxed);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetOpaque(HInvoke* invoke) {
+ CreateVarHandleSetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetOpaque(HInvoke* invoke) {
+ GenerateVarHandleSet(invoke, codegen_, std::memory_order_relaxed);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetRelease(HInvoke* invoke) {
+ CreateVarHandleSetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetRelease(HInvoke* invoke) {
+ GenerateVarHandleSet(invoke, codegen_, std::memory_order_release);
+}
+
+void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetVolatile(HInvoke* invoke) {
+ CreateVarHandleSetLocations(invoke);
+}
+
+void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetVolatile(HInvoke* invoke) {
+ GenerateVarHandleSet(invoke, codegen_, std::memory_order_seq_cst);
+}
+
+static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ std::memory_order order,
+ bool return_success,
+ bool strong,
+ bool byte_swap = false) {
+ UNUSED(invoke, codegen, order, return_success, strong, byte_swap);
+ LOG(FATAL) << "Unimplemented!";
+}
+
+static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen,
+ GetAndUpdateOp get_and_update_op,
+ std::memory_order order,
+ bool byte_swap = false) {
+ UNUSED(invoke, codegen, get_and_update_op, order, byte_swap);
+ LOG(FATAL) << "Unimplemented!";
+}
+
+void VarHandleSlowPathRISCV64::EmitByteArrayViewCode(CodeGenerator* codegen_in) {
+ DCHECK(GetByteArrayViewCheckLabel()->IsLinked());
+ CodeGeneratorRISCV64* codegen = down_cast<CodeGeneratorRISCV64*>(codegen_in);
+ Riscv64Assembler* assembler = codegen->GetAssembler();
+ HInvoke* invoke = GetInvoke();
+ mirror::VarHandle::AccessModeTemplate access_mode_template = GetAccessModeTemplate();
+ DataType::Type value_type =
+ GetVarHandleExpectedValueType(invoke, /*expected_coordinates_count=*/ 2u);
+ DCHECK_NE(value_type, DataType::Type::kReference);
+ size_t size = DataType::Size(value_type);
+ DCHECK_GT(size, 1u);
+ LocationSummary* locations = invoke->GetLocations();
+ XRegister varhandle = locations->InAt(0).AsRegister<XRegister>();
+ XRegister object = locations->InAt(1).AsRegister<XRegister>();
+ XRegister index = locations->InAt(2).AsRegister<XRegister>();
+
+ MemberOffset class_offset = mirror::Object::ClassOffset();
+ MemberOffset array_length_offset = mirror::Array::LengthOffset();
+ MemberOffset data_offset = mirror::Array::DataOffset(Primitive::kPrimByte);
+ MemberOffset native_byte_order_offset = mirror::ByteArrayViewVarHandle::NativeByteOrderOffset();
+
+ __ Bind(GetByteArrayViewCheckLabel());
+
+ VarHandleTarget target = GetVarHandleTarget(invoke);
+ {
+ ScratchRegisterScope srs(assembler);
+ XRegister temp = srs.AllocateXRegister();
+ XRegister temp2 = srs.AllocateXRegister();
+
+ // The main path checked that the coordinateType0 is an array class that matches
+ // the class of the actual coordinate argument but it does not match the value type.
+ // Check if the `varhandle` references a ByteArrayViewVarHandle instance.
+ __ Loadwu(temp, varhandle, class_offset.Int32Value());
+ codegen->MaybeUnpoisonHeapReference(temp);
+ codegen->LoadClassRootForIntrinsic(temp2, ClassRoot::kJavaLangInvokeByteArrayViewVarHandle);
+ __ Bne(temp, temp2, GetEntryLabel());
+
+ // Check for array index out of bounds.
+ __ Loadw(temp, object, array_length_offset.Int32Value());
+ __ Bgeu(index, temp, GetEntryLabel());
+ __ Addi(temp2, index, size - 1u);
+ __ Bgeu(temp2, temp, GetEntryLabel());
+
+ // Construct the target.
+ __ Addi(target.offset, index, data_offset.Int32Value());
+
+ // Alignment check. For unaligned access, go to the runtime.
+ DCHECK(IsPowerOfTwo(size));
+ __ Andi(temp, target.offset, size - 1u);
+ __ Bnez(temp, GetEntryLabel());
+
+ // Byte order check. For native byte order return to the main path.
+ if (access_mode_template == mirror::VarHandle::AccessModeTemplate::kSet &&
+ IsZeroBitPattern(invoke->InputAt(invoke->GetNumberOfArguments() - 1u))) {
+ // There is no reason to differentiate between native byte order and byte-swap
+ // for setting a zero bit pattern. Just return to the main path.
+ __ J(GetNativeByteOrderLabel());
+ return;
+ }
+ __ Loadbu(temp, varhandle, native_byte_order_offset.Int32Value());
+ __ Bnez(temp, GetNativeByteOrderLabel());
+ }
+
+ switch (access_mode_template) {
+ case mirror::VarHandle::AccessModeTemplate::kGet:
+ GenerateVarHandleGet(invoke, codegen, order_, /*byte_swap=*/ true);
+ break;
+ case mirror::VarHandle::AccessModeTemplate::kSet:
+ GenerateVarHandleSet(invoke, codegen, order_, /*byte_swap=*/ true);
+ break;
+ case mirror::VarHandle::AccessModeTemplate::kCompareAndSet:
+ case mirror::VarHandle::AccessModeTemplate::kCompareAndExchange:
+ GenerateVarHandleCompareAndSetOrExchange(
+ invoke, codegen, order_, return_success_, strong_, /*byte_swap=*/ true);
+ break;
+ case mirror::VarHandle::AccessModeTemplate::kGetAndUpdate:
+ GenerateVarHandleGetAndUpdate(
+ invoke, codegen, get_and_update_op_, order_, /*byte_swap=*/ true);
+ break;
+ }
+ __ J(GetExitLabel());
+}
+
#define MARK_UNIMPLEMENTED(Name) UNIMPLEMENTED_INTRINSIC(RISCV64, Name)
UNIMPLEMENTED_INTRINSIC_LIST_RISCV64(MARK_UNIMPLEMENTED);
#undef MARK_UNIMPLEMENTED
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 02f312e74e..b269f45b6f 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3768,7 +3768,7 @@ static Register GenerateVarHandleFieldReference(HInvoke* invoke,
const uint32_t declaring_class_offset = ArtField::DeclaringClassOffset().Uint32Value();
Register varhandle_object = locations->InAt(0).AsRegister<Register>();
- // Load the ArtField and the offset
+ // Load the ArtField* and the offset.
__ movl(temp, Address(varhandle_object, artfield_offset));
__ movl(offset, Address(temp, offset_offset));
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 842af6b73f..8b4b05da04 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3510,7 +3510,7 @@ static void GenerateVarHandleAccessModeAndVarTypeChecks(HInvoke* invoke,
__ movl(temp, Address(varhandle, var_type_offset));
__ MaybeUnpoisonHeapReference(temp);
- // Check check the varType.primitiveType field against the type we're trying to retrieve.
+ // Check the varType.primitiveType field against the type we're trying to use.
__ cmpw(Address(temp, primitive_type_offset), Immediate(static_cast<uint16_t>(primitive_type)));
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -3754,22 +3754,22 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
__ movl(CpuRegister(target.offset), Immediate(target_field->GetOffset().Uint32Value()));
} else {
// For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- CpuRegister method((expected_coordinates_count == 0) ? target.object : target.offset);
+ // so we can use `target.object` as temporary for the `ArtField*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtField*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtField*`.
+ CpuRegister field((expected_coordinates_count == 0) ? target.object : target.offset);
const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
const MemberOffset offset_offset = ArtField::OffsetOffset();
- // Load the ArtField, the offset and, if needed, declaring class.
- __ movq(method, Address(varhandle, art_field_offset));
- __ movl(CpuRegister(target.offset), Address(method, offset_offset));
+ // Load the ArtField*, the offset and, if needed, declaring class.
+ __ movq(field, Address(varhandle, art_field_offset));
+ __ movl(CpuRegister(target.offset), Address(field, offset_offset));
if (expected_coordinates_count == 0u) {
InstructionCodeGeneratorX86_64* instr_codegen = codegen->GetInstructionCodegen();
instr_codegen->GenerateGcRootFieldLoad(invoke,
Location::RegisterLocation(target.object),
- Address(method, ArtField::DeclaringClassOffset()),
+ Address(field, ArtField::DeclaringClassOffset()),
/*fixup_label=*/nullptr,
GetCompilerReadBarrierOption());
}