summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Treehugger Robot <treehugger-gerrit@google.com> 2018-01-05 17:36:25 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2018-01-05 17:36:25 +0000
commit70a58af9c733bc14c4573dddd282b3c02ccf4985 (patch)
tree46fe53c06014ec4809f5f4c1dbd1e1c941349619 /compiler/optimizing
parent9382c0d53f91f3788a5254495917898d8b61fe00 (diff)
parent2c64a837e62c2839521c89060b5bb0dcb237ddda (diff)
Merge "Change ClassStatus to fit into 4 bits."
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc6
-rw-r--r--compiler/optimizing/code_generator_mips.cc6
-rw-r--r--compiler/optimizing/code_generator_mips64.cc6
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
6 files changed, 15 insertions, 16 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f9dcb5d6ef..13886b32b3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2103,9 +2103,8 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
// TODO(vixl): Let the MacroAssembler handle MemOperand.
__ Add(temp, class_reg, status_offset);
__ Ldarb(temp, HeapOperand(temp));
- __ Cmp(temp, mirror::Class::kStatusInitialized);
- __ B(ne, slow_path->GetEntryLabel());
- // Use Bne instead of Blt because ARM64 doesn't have Ldarsb.
+ __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ __ B(lo, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c6e1b042a7..7f8353312f 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7173,12 +7173,12 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- GetAssembler()->LoadFromOffset(kLoadSignedByte,
+ GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
temp,
class_reg,
mirror::Class::StatusOffset().Int32Value());
- __ Cmp(temp, mirror::Class::kStatusInitialized);
- __ B(lt, slow_path->GetEntryLabel());
+ __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ __ B(lo, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we may be in a situation where caches are not synced
// properly. Therefore, we do a memory fence.
__ Dmb(ISH);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c4772ad79f..ebe252a9c8 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1915,9 +1915,9 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
Register class_reg) {
- __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, mirror::Class::kStatusInitialized);
- __ Blt(TMP, AT, slow_path->GetEntryLabel());
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ __ Bltu(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index c8891eddfc..3ea7b827bb 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1761,9 +1761,9 @@ void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
GpuRegister class_reg) {
- __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, mirror::Class::kStatusInitialized);
- __ Bltc(TMP, AT, slow_path->GetEntryLabel());
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ __ Bltuc(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ba222fe532..68532386e1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6220,8 +6220,8 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
__ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(mirror::Class::kStatusInitialized));
- __ j(kLess, slow_path->GetEntryLabel());
+ Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ __ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the X86 memory model.
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index caad7885bd..1f8d822507 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5426,8 +5426,8 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
__ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(mirror::Class::kStatusInitialized));
- __ j(kLess, slow_path->GetEntryLabel());
+ Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ __ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the x86-64 memory model.
}