summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.h14
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86_64.h13
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
6 files changed, 4 insertions, 31 deletions
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e7f7d57b98..bc3256ec8c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4157,7 +4157,7 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
*/
switch (kind) {
case MemBarrierKind::kAnyAny: {
- MemoryFence();
+ __ mfence();
break;
}
case MemBarrierKind::kAnyStore:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ebbe486cb5..7c292fa103 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
-#include "arch/x86/instruction_set_features_x86.h"
#include "code_generator.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
@@ -497,19 +496,6 @@ class CodeGeneratorX86 : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- // Ensure that prior stores complete to memory before subsequent loads.
- // The locked add implementation will avoid serializing device memory, but will
- // touch (but not change) the top of the stack.
- // The 'non_temporal' parameter should be used to ensure ordering of non-temporal stores.
- void MemoryFence(bool non_temporal = false) {
- if (!non_temporal && isa_features_.PrefersLockedAddSynchronization()) {
- assembler_.lock()->addl(Address(ESP, 0), Immediate(0));
- } else {
- assembler_.mfence();
- }
- }
-
-
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 02cc88d191..92cef5f226 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4029,7 +4029,7 @@ void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
*/
switch (kind) {
case MemBarrierKind::kAnyAny: {
- codegen_->MemoryFence();
+ __ mfence();
break;
}
case MemBarrierKind::kAnyStore:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index e5a487c761..dda9ea22d9 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
-#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "code_generator.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
@@ -480,18 +479,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
int64_t v,
HInstruction* instruction);
- // Ensure that prior stores complete to memory before subsequent loads.
- // The locked add implementation will avoid serializing device memory, but will
- // touch (but not change) the top of the stack. The locked add should not be used for
- // ordering non-temporal stores.
- void MemoryFence(bool force_mfence = false) {
- if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) {
- assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0));
- } else {
- assembler_.mfence();
- }
- }
-
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 74ade7c420..fd454d8322 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2005,7 +2005,7 @@ static void GenUnsafePut(LocationSummary* locations,
}
if (is_volatile) {
- codegen->MemoryFence();
+ __ mfence();
}
if (type == Primitive::kPrimNot) {
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6e54dde0f0..ce737e3f7e 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2080,7 +2080,7 @@ static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool
}
if (is_volatile) {
- codegen->MemoryFence();
+ __ mfence();
}
if (type == Primitive::kPrimNot) {