diff options
author | 2018-01-04 11:31:56 +0000 | |
---|---|---|
committer | 2018-01-05 11:27:50 +0000 | |
commit | 2c64a837e62c2839521c89060b5bb0dcb237ddda (patch) | |
tree | 65475ed2e313ff17354e741bac7e9c85739b8b95 /compiler/optimizing | |
parent | 6cd0005698181e4cef2247b632d396e605d58fa3 (diff) |
Change ClassStatus to fit into 4 bits.
In preparation for extending the type check bit string from
24 to 28 bits, rewrite ClassStatus to fit into 4 bits. Also
perform a proper cleanup of the ClassStatus, i.e. change it
to an enum class, remove the "Status" word from enumerator
names, replace "Max" with "Last" in line with other
enumerations and remove aliases from mirror::Class.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: testrunner.py --target --optimizing
Bug: 64692057
Bug: 65318848
Change-Id: Iec1610ba5dac2c527b36c12819f132e1a77f2d45
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 4 |
6 files changed, 15 insertions, 16 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index f9dcb5d6ef..13886b32b3 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2103,9 +2103,8 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod // TODO(vixl): Let the MacroAssembler handle MemOperand. __ Add(temp, class_reg, status_offset); __ Ldarb(temp, HeapOperand(temp)); - __ Cmp(temp, mirror::Class::kStatusInitialized); - __ B(ne, slow_path->GetEntryLabel()); - // Use Bne instead of Blt because ARM64 doesn't have Ldarsb. + __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized)); + __ B(lo, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index c6e1b042a7..7f8353312f 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -7173,12 +7173,12 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck( LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); - GetAssembler()->LoadFromOffset(kLoadSignedByte, + GetAssembler()->LoadFromOffset(kLoadUnsignedByte, temp, class_reg, mirror::Class::StatusOffset().Int32Value()); - __ Cmp(temp, mirror::Class::kStatusInitialized); - __ B(lt, slow_path->GetEntryLabel()); + __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized)); + __ B(lo, slow_path->GetEntryLabel()); // Even if the initialized flag is set, we may be in a situation where caches are not synced // properly. Therefore, we do a memory fence. __ Dmb(ISH); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index c4772ad79f..ebe252a9c8 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -1915,9 +1915,9 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg) { - __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value()); - __ LoadConst32(AT, mirror::Class::kStatusInitialized); - __ Blt(TMP, AT, slow_path->GetEntryLabel()); + __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value()); + __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized)); + __ Bltu(TMP, AT, slow_path->GetEntryLabel()); // Even if the initialized flag is set, we need to ensure consistent memory ordering. __ Sync(0); __ Bind(slow_path->GetExitLabel()); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index c8891eddfc..3ea7b827bb 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1761,9 +1761,9 @@ void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) { void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg) { - __ LoadFromOffset(kLoadSignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value()); - __ LoadConst32(AT, mirror::Class::kStatusInitialized); - __ Bltc(TMP, AT, slow_path->GetEntryLabel()); + __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value()); + __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized)); + __ Bltuc(TMP, AT, slow_path->GetEntryLabel()); // Even if the initialized flag is set, we need to ensure consistent memory ordering. __ Sync(0); __ Bind(slow_path->GetExitLabel()); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index ba222fe532..68532386e1 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -6220,8 +6220,8 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorX86::GenerateClassInitializationCheck( SlowPathCode* slow_path, Register class_reg) { __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()), - Immediate(mirror::Class::kStatusInitialized)); - __ j(kLess, slow_path->GetEntryLabel()); + Immediate(enum_cast<>(ClassStatus::kInitialized))); + __ j(kBelow, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); // No need for memory fence, thanks to the X86 memory model. } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index caad7885bd..1f8d822507 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -5426,8 +5426,8 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) { void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck( SlowPathCode* slow_path, CpuRegister class_reg) { __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()), - Immediate(mirror::Class::kStatusInitialized)); - __ j(kLess, slow_path->GetEntryLabel()); + Immediate(enum_cast<>(ClassStatus::kInitialized))); + __ j(kBelow, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); // No need for memory fence, thanks to the x86-64 memory model. } |