Opt Compiler: ARM64: Add support for rem-float, rem-double and volatile.
Add support for rem-float, rem-double and volatile memory accesses
using acquire-release and memory barriers.
Change-Id: I96a24dff66002c3b772c3d8e6ed792e3cb59048a
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1d5bfb7..19488a4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
#include "code_generator.h"
+#include "dex/compiler_enums.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
#include "utils/arm64/assembler_arm64.h"
@@ -108,6 +109,7 @@
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* instr);
void HandleShift(HBinaryOperation* instr);
@@ -257,6 +259,8 @@
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadCurrentMethod(vixl::Register current_method);
+ void LoadAcquire(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
+ void StoreRelease(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
// Generate code to invoke a runtime entry point.
void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);