summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Treehugger Robot <treehugger-gerrit@google.com> 2017-11-08 03:26:30 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2017-11-08 03:26:30 +0000
commitcefd676fb79d225fcd7e8e8c0ef141d70a2f45b8 (patch)
treebe625fa0bc1255cbea5c5066dd878fb0b2a1c454 /compiler/optimizing
parentdbc26ad5e8ded15688d20a39344c677077311279 (diff)
parent86083f7cd118f3d6c757191e83b4e4abaabdc5d7 (diff)
Merge "runtime: Bitstring implementation for subtype checking (4/4)."
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc2
-rw-r--r--compiler/optimizing/code_generator_mips.cc2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
6 files changed, 8 insertions, 7 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e01b7b78cb..a0cb43ee01 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2171,9 +2171,10 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
// TODO(vixl): Let the MacroAssembler handle MemOperand.
__ Add(temp, class_reg, status_offset);
- __ Ldar(temp, HeapOperand(temp));
+ __ Ldarb(temp, HeapOperand(temp));
__ Cmp(temp, mirror::Class::kStatusInitialized);
- __ B(lt, slow_path->GetEntryLabel());
+ __ B(ne, slow_path->GetEntryLabel());
+ // Use Bne instead of Blt because ARM64 doesn't have Ldarsb.
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index edd307263d..a8f7e8600a 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7255,7 +7255,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- GetAssembler()->LoadFromOffset(kLoadWord,
+ GetAssembler()->LoadFromOffset(kLoadSignedByte,
temp,
class_reg,
mirror::Class::StatusOffset().Int32Value());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d2cfa4f187..f9f5a4da56 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1985,7 +1985,7 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
Register class_reg) {
- __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ LoadConst32(AT, mirror::Class::kStatusInitialized);
__ Blt(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 28ca7cb94a..0a6d9159d1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1823,7 +1823,7 @@ void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
GpuRegister class_reg) {
- __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ LoadConst32(AT, mirror::Class::kStatusInitialized);
__ Bltc(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a1500825f8..9b351605a4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6212,7 +6212,7 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
- __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
Immediate(mirror::Class::kStatusInitialized));
__ j(kLess, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index db7e53e74b..8f7961ec6e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5403,7 +5403,7 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
- __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
Immediate(mirror::Class::kStatusInitialized));
__ j(kLess, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());