summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm.cc8
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_mips.cc221
-rw-r--r--compiler/optimizing/code_generator_mips64.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.cc7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc14
-rw-r--r--compiler/optimizing/nodes.h6
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc317
8 files changed, 368 insertions, 226 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9870876879..77d6f23fff 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1129,7 +1129,13 @@ void CodeGeneratorARM::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 969d653f97..f02b028541 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1046,7 +1046,15 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
// ... : other preserved fp registers.
// ... : reserved frame space.
// sp[0] : current method.
- __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
+ } else {
+ __ Claim(frame_size);
+ }
GetAssembler()->cfi().AdjustCFAOffset(frame_size);
GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
frame_size - GetCoreSpillSize());
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 990bbcc85b..e336df8c6c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -743,9 +743,12 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
// TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
}
- // Store the current method pointer.
- // TODO: can we not do this if RequiresCurrentMethod() returns false?
- __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS::GenerateFrameExit() {
@@ -2252,6 +2255,11 @@ void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
case Primitive::kPrimShort:
case Primitive::kPrimChar:
case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2820,19 +2828,36 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
switch (cond) {
case kCondEQ:
case kCondNE:
- if (use_imm && IsUint<16>(rhs_imm)) {
- __ Xori(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
+ if (use_imm && IsInt<16>(-rhs_imm)) {
+ if (rhs_imm == 0) {
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, lhs, 1);
+ } else {
+ __ Sltu(dst, ZERO, lhs);
+ }
+ } else {
+ __ Addiu(dst, lhs, -rhs_imm);
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
- __ Xor(dst, lhs, rhs_reg);
- }
- if (cond == kCondEQ) {
- __ Sltiu(dst, dst, 1);
} else {
- __ Sltu(dst, ZERO, dst);
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
break;
@@ -2938,7 +2963,7 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
Register lhs = locations->InAt(0).AsRegister<Register>();
Location rhs_location = locations->InAt(1);
Register rhs_reg = ZERO;
- int32_t rhs_imm = 0;
+ int64_t rhs_imm = 0;
bool use_imm = rhs_location.IsConstant();
if (use_imm) {
rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
@@ -2975,42 +3000,136 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
break;
}
} else {
- if (use_imm) {
- // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- switch (cond) {
- case kCondEQ:
- __ Beq(lhs, rhs_reg, label);
- break;
- case kCondNE:
- __ Bne(lhs, rhs_reg, label);
- break;
- case kCondLT:
- __ Blt(lhs, rhs_reg, label);
- break;
- case kCondGE:
- __ Bge(lhs, rhs_reg, label);
- break;
- case kCondLE:
- __ Bge(rhs_reg, lhs, label);
- break;
- case kCondGT:
- __ Blt(rhs_reg, lhs, label);
- break;
- case kCondB:
- __ Bltu(lhs, rhs_reg, label);
- break;
- case kCondAE:
- __ Bgeu(lhs, rhs_reg, label);
- break;
- case kCondBE:
- __ Bgeu(rhs_reg, lhs, label);
- break;
- case kCondA:
- __ Bltu(rhs_reg, lhs, label);
- break;
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ if (isR6 || !use_imm) {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ switch (cond) {
+ case kCondEQ:
+ __ Beq(lhs, rhs_reg, label);
+ break;
+ case kCondNE:
+ __ Bne(lhs, rhs_reg, label);
+ break;
+ case kCondLT:
+ __ Blt(lhs, rhs_reg, label);
+ break;
+ case kCondGE:
+ __ Bge(lhs, rhs_reg, label);
+ break;
+ case kCondLE:
+ __ Bge(rhs_reg, lhs, label);
+ break;
+ case kCondGT:
+ __ Blt(rhs_reg, lhs, label);
+ break;
+ case kCondB:
+ __ Bltu(lhs, rhs_reg, label);
+ break;
+ case kCondAE:
+ __ Bgeu(lhs, rhs_reg, label);
+ break;
+ case kCondBE:
+ __ Bgeu(rhs_reg, lhs, label);
+ break;
+ case kCondA:
+ __ Bltu(rhs_reg, lhs, label);
+ break;
+ }
+ } else {
+ // Special cases for more efficient comparison with constants on R2.
+ switch (cond) {
+ case kCondEQ:
+ __ LoadConst32(TMP, rhs_imm);
+ __ Beq(lhs, TMP, label);
+ break;
+ case kCondNE:
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bne(lhs, TMP, label);
+ break;
+ case kCondLT:
+ if (IsInt<16>(rhs_imm)) {
+ __ Slti(TMP, lhs, rhs_imm);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Blt(lhs, TMP, label);
+ }
+ break;
+ case kCondGE:
+ if (IsInt<16>(rhs_imm)) {
+ __ Slti(TMP, lhs, rhs_imm);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bge(lhs, TMP, label);
+ }
+ break;
+ case kCondLE:
+ if (IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(TMP, lhs, rhs_imm + 1);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bge(TMP, lhs, label);
+ }
+ break;
+ case kCondGT:
+ if (IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs > rhs via !(lhs < rhs + 1).
+ __ Slti(TMP, lhs, rhs_imm + 1);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Blt(TMP, lhs, label);
+ }
+ break;
+ case kCondB:
+ if (IsInt<16>(rhs_imm)) {
+ __ Sltiu(TMP, lhs, rhs_imm);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bltu(lhs, TMP, label);
+ }
+ break;
+ case kCondAE:
+ if (IsInt<16>(rhs_imm)) {
+ __ Sltiu(TMP, lhs, rhs_imm);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bgeu(lhs, TMP, label);
+ }
+ break;
+ case kCondBE:
+ if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ __ Sltiu(TMP, lhs, rhs_imm + 1);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bgeu(TMP, lhs, label);
+ }
+ break;
+ case kCondA:
+ if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs > rhs via !(lhs < rhs + 1).
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ __ Sltiu(TMP, lhs, rhs_imm + 1);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bltu(TMP, lhs, label);
+ }
+ break;
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 02576bda67..010bf24232 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -556,9 +556,14 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
- static_assert(IsInt<16>(kCurrentMethodStackOffset),
- "kCurrentMethodStackOffset must fit into int16_t");
- __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ }
}
void CodeGeneratorMIPS64::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0b23599665..960f01ce9d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -898,7 +898,12 @@ void CodeGeneratorX86::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
- __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 28638d721d..5cabc8fa06 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -457,7 +457,8 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Register reg = obj_.AsRegister<Register>();
+ CpuRegister cpu_reg = obj_.AsRegister<CpuRegister>();
+ Register reg = cpu_reg.AsRegister();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
DCHECK(instruction_->IsInstanceFieldGet() ||
@@ -476,7 +477,7 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
if (unpoison_) {
// Object* ref = ref_addr->AsMirrorPtr()
- __ MaybeUnpoisonHeapReference(obj_.AsRegister<CpuRegister>());
+ __ MaybeUnpoisonHeapReference(cpu_reg);
}
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
@@ -1140,8 +1141,13 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
- CpuRegister(kMethodRegisterArgument));
+ // Save the current method if we need it. Note that we do not
+ // do this in HCurrentMethod, as the instruction might have been removed
+ // in the SSA graph.
+ if (RequiresCurrentMethod()) {
+ __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ CpuRegister(kMethodRegisterArgument));
+ }
}
void CodeGeneratorX86_64::GenerateFrameExit() {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index daefc3c3aa..6f4f3c9505 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2093,10 +2093,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// to the current method. Such instructions are:
// (1): Instructions that require an environment, as calling the runtime requires
// to walk the stack and have the current method stored at a specific stack address.
- // (2): Object literals like classes and strings, that are loaded from the dex cache
- // fields of the current method.
+ // (2): HCurrentMethod, potentially used by HInvokeStaticOrDirect, HLoadString, or HLoadClass
+ // to access the dex cache.
bool NeedsCurrentMethod() const {
- return NeedsEnvironment() || IsLoadClass() || IsLoadString();
+ return NeedsEnvironment() || IsCurrentMethod();
}
// Returns whether the code generation of the instruction will require to have access
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 6c5030c9cb..f735dc8cb3 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -1,10 +1,10 @@
static constexpr uint8_t expected_asm_kThumb2[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x0B, 0xB0,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x0B, 0xB0,
0xBD, 0xEC, 0x02, 0x8A, 0x60, 0xBD,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x42, 0x0A, 0x42,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x0A, 0x42,
0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B, 0x0E,
0x40,
};
@@ -19,20 +19,19 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: .cfi_remember_state
-// 0x0000000a: add sp, sp, #44
-// 0x0000000c: .cfi_def_cfa_offset: 20
-// 0x0000000c: vpop.f32 {s16-s17}
-// 0x00000010: .cfi_def_cfa_offset: 12
-// 0x00000010: .cfi_restore_extended: r80
-// 0x00000010: .cfi_restore_extended: r81
-// 0x00000010: pop {r5, r6, pc}
-// 0x00000012: .cfi_restore_state
-// 0x00000012: .cfi_def_cfa_offset: 64
+// 0x00000008: .cfi_remember_state
+// 0x00000008: add sp, sp, #44
+// 0x0000000a: .cfi_def_cfa_offset: 20
+// 0x0000000a: vpop.f32 {s16-s17}
+// 0x0000000e: .cfi_def_cfa_offset: 12
+// 0x0000000e: .cfi_restore_extended: r80
+// 0x0000000e: .cfi_restore_extended: r81
+// 0x0000000e: pop {r5, r6, pc}
+// 0x00000010: .cfi_restore_state
+// 0x00000010: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
+ 0xFF, 0x03, 0x01, 0xD1, 0xF4, 0x17, 0x00, 0xF9, 0xF5, 0x7B, 0x03, 0xA9,
0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0x17, 0x40, 0xF9,
0xF5, 0x7B, 0x43, 0xA9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
@@ -41,7 +40,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
0x44, 0xD4, 0x44, 0xD5, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: str x0, [sp, #-64]!
+// 0x00000000: sub sp, sp, #0x40 (64)
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: str x20, [sp, #40]
// 0x00000008: .cfi_offset: r20 at cfa-24
@@ -67,12 +66,12 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x00000024: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86[] = {
- 0x56, 0x55, 0x83, 0xEC, 0x34, 0x89, 0x04, 0x24, 0x83, 0xC4, 0x34, 0x5D,
+ 0x56, 0x55, 0x83, 0xEC, 0x34, 0x83, 0xC4, 0x34, 0x5D,
0x5E, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86[] = {
0x41, 0x0E, 0x08, 0x86, 0x02, 0x41, 0x0E, 0x0C, 0x85, 0x03, 0x43, 0x0E,
- 0x40, 0x43, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
+ 0x40, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
0x04, 0xC6, 0x41, 0x0B, 0x0E, 0x40,
};
// 0x00000000: push esi
@@ -83,29 +82,28 @@ static constexpr uint8_t expected_cfi_kX86[] = {
// 0x00000002: .cfi_offset: r5 at cfa-12
// 0x00000002: sub esp, 52
// 0x00000005: .cfi_def_cfa_offset: 64
-// 0x00000005: mov [esp], eax
-// 0x00000008: .cfi_remember_state
-// 0x00000008: add esp, 52
-// 0x0000000b: .cfi_def_cfa_offset: 12
-// 0x0000000b: pop ebp
-// 0x0000000c: .cfi_def_cfa_offset: 8
-// 0x0000000c: .cfi_restore: r5
-// 0x0000000c: pop esi
-// 0x0000000d: .cfi_def_cfa_offset: 4
-// 0x0000000d: .cfi_restore: r6
-// 0x0000000d: ret
-// 0x0000000e: .cfi_restore_state
-// 0x0000000e: .cfi_def_cfa_offset: 64
+// 0x00000005: .cfi_remember_state
+// 0x00000005: add esp, 52
+// 0x00000008: .cfi_def_cfa_offset: 12
+// 0x00000008: pop ebp
+// 0x0000000a: .cfi_def_cfa_offset: 8
+// 0x0000000a: .cfi_restore: r5
+// 0x0000000a: pop esi
+// 0x0000000b: .cfi_def_cfa_offset: 4
+// 0x0000000b: .cfi_restore: r6
+// 0x0000000b: ret
+// 0x0000000c: .cfi_restore_state
+// 0x0000000c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kX86_64[] = {
0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
- 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18,
0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
- 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x0A, 0x47, 0xDD, 0x47,
0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
0x41, 0x0B, 0x0E, 0x40,
};
@@ -121,34 +119,33 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000000d: .cfi_offset: r30 at cfa-32
// 0x0000000d: movsd [rsp + 24], xmm12
// 0x00000014: .cfi_offset: r29 at cfa-40
-// 0x00000014: movq [rsp], rdi
-// 0x00000018: .cfi_remember_state
-// 0x00000018: movsd xmm12, [rsp + 24]
-// 0x0000001f: .cfi_restore: r29
-// 0x0000001f: movsd xmm13, [rsp + 32]
-// 0x00000026: .cfi_restore: r30
-// 0x00000026: addq rsp, 40
-// 0x0000002a: .cfi_def_cfa_offset: 24
-// 0x0000002a: pop rbx
-// 0x0000002b: .cfi_def_cfa_offset: 16
-// 0x0000002b: .cfi_restore: r3
-// 0x0000002b: pop rbp
-// 0x0000002c: .cfi_def_cfa_offset: 8
-// 0x0000002c: .cfi_restore: r6
-// 0x0000002c: ret
-// 0x0000002d: .cfi_restore_state
-// 0x0000002d: .cfi_def_cfa_offset: 64
+// 0x00000014: .cfi_remember_state
+// 0x00000014: movsd xmm12, [rsp + 24]
+// 0x0000001c: .cfi_restore: r29
+// 0x0000001c: movsd xmm13, [rsp + 32]
+// 0x00000022: .cfi_restore: r30
+// 0x00000022: addq rsp, 40
+// 0x00000026: .cfi_def_cfa_offset: 24
+// 0x00000026: pop rbx
+// 0x00000027: .cfi_def_cfa_offset: 16
+// 0x00000027: .cfi_restore: r3
+// 0x00000027: pop rbp
+// 0x00000028: .cfi_def_cfa_offset: 8
+// 0x00000028: .cfi_restore: r6
+// 0x00000028: ret
+// 0x00000029: .cfi_restore_state
+// 0x00000029: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
+ 0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
0x34, 0x00, 0xB0, 0x8F, 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7,
0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_cfi_kMips[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x4C, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
+ 0x48, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -161,33 +158,33 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: lw r31, +60(r29)
-// 0x00000020: .cfi_restore: r31
-// 0x00000020: lw r17, +56(r29)
-// 0x00000024: .cfi_restore: r17
-// 0x00000024: lw r16, +52(r29)
-// 0x00000028: .cfi_restore: r16
-// 0x00000028: ldc1 f22, +40(r29)
-// 0x0000002c: ldc1 f20, +32(r29)
-// 0x00000030: jr r31
-// 0x00000034: addiu r29, r29, 64
-// 0x00000038: .cfi_def_cfa_offset: 0
-// 0x00000038: .cfi_restore_state
-// 0x00000038: .cfi_def_cfa_offset: 64
+// 0x00000018: .cfi_remember_state
+// 0x00000018: lw r31, +60(r29)
+// 0x0000001c: .cfi_restore: r31
+// 0x0000001c: lw r17, +56(r29)
+// 0x00000020: .cfi_restore: r17
+// 0x00000020: lw r16, +52(r29)
+// 0x00000024: .cfi_restore: r16
+// 0x00000024: ldc1 f22, +40(r29)
+// 0x00000028: ldc1 f20, +32(r29)
+// 0x0000002c: jr r31
+// 0x00000030: addiu r29, r29, 64
+// 0x00000034: .cfi_def_cfa_offset: 0
+// 0x00000034: .cfi_restore_state
+// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x18, 0x00, 0xBD, 0x67,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x18, 0x00, 0xBD, 0x67,
0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7, 0x10, 0x00, 0xB0, 0xDF,
0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF, 0x28, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
+
static constexpr uint8_t expected_cfi_kMips64[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x44, 0x0A, 0x44,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x0A, 0x44,
0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0, 0x44, 0xD1, 0x44, 0xDF,
0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -205,29 +202,28 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: .cfi_remember_state
-// 0x00000020: daddiu r29, r29, 24
-// 0x00000024: .cfi_def_cfa_offset: 40
-// 0x00000024: ldc1 f24, +0(r29)
-// 0x00000028: .cfi_restore: r56
-// 0x00000028: ldc1 f25, +8(r29)
-// 0x0000002c: .cfi_restore: r57
-// 0x0000002c: ld r16, +16(r29)
-// 0x00000030: .cfi_restore: r16
-// 0x00000030: ld r17, +24(r29)
-// 0x00000034: .cfi_restore: r17
-// 0x00000034: ld r31, +32(r29)
-// 0x00000038: .cfi_restore: r31
-// 0x00000038: daddiu r29, r29, 40
-// 0x0000003c: .cfi_def_cfa_offset: 0
-// 0x0000003c: jr r31
-// 0x00000040: nop
-// 0x00000044: .cfi_restore_state
-// 0x00000044: .cfi_def_cfa_offset: 64
+// 0x0000001c: .cfi_remember_state
+// 0x0000001c: daddiu r29, r29, 24
+// 0x00000020: .cfi_def_cfa_offset: 40
+// 0x00000020: ldc1 f24, +0(r29)
+// 0x00000024: .cfi_restore: r56
+// 0x00000024: ldc1 f25, +8(r29)
+// 0x00000028: .cfi_restore: r57
+// 0x00000028: ld r16, +16(r29)
+// 0x0000002c: .cfi_restore: r16
+// 0x0000002c: ld r17, +24(r29)
+// 0x00000030: .cfi_restore: r17
+// 0x00000030: ld r31, +32(r29)
+// 0x00000034: .cfi_restore: r31
+// 0x00000034: daddiu r29, r29, 40
+// 0x00000038: .cfi_def_cfa_offset: 0
+// 0x00000038: jr r31
+// 0x0000003c: nop
+// 0x00000040: .cfi_restore_state
+// 0x00000040: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
- 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x00, 0x28,
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
@@ -243,7 +239,7 @@ static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
};
static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
- 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x88, 0x0A,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
0x42, 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B,
0x0E, 0x40,
};
@@ -258,9 +254,9 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000006: .cfi_offset_extended: r81 at cfa-16
// 0x00000006: sub sp, sp, #44
// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: cmp r0, #0
-// 0x0000000c: beq +128 (0x00000090)
+// 0x00000008: cmp r0, #0
+// 0x0000000a: beq +128 (0x00000090)
+// 0x0000000c: ldr r0, [r0, #0]
// 0x0000000e: ldr r0, [r0, #0]
// 0x00000010: ldr r0, [r0, #0]
// 0x00000012: ldr r0, [r0, #0]
@@ -325,22 +321,21 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
// 0x00000088: ldr r0, [r0, #0]
// 0x0000008a: ldr r0, [r0, #0]
// 0x0000008c: ldr r0, [r0, #0]
-// 0x0000008e: ldr r0, [r0, #0]
-// 0x00000090: .cfi_remember_state
-// 0x00000090: add sp, sp, #44
-// 0x00000092: .cfi_def_cfa_offset: 20
-// 0x00000092: vpop.f32 {s16-s17}
-// 0x00000096: .cfi_def_cfa_offset: 12
-// 0x00000096: .cfi_restore_extended: r80
-// 0x00000096: .cfi_restore_extended: r81
-// 0x00000096: pop {r5, r6, pc}
-// 0x00000098: .cfi_restore_state
-// 0x00000098: .cfi_def_cfa_offset: 64
+// 0x0000008e: .cfi_remember_state
+// 0x0000008e: add sp, sp, #44
+// 0x00000090: .cfi_def_cfa_offset: 20
+// 0x00000090: vpop.f32 {s16-s17}
+// 0x00000094: .cfi_def_cfa_offset: 12
+// 0x00000094: .cfi_restore_extended: r80
+// 0x00000094: .cfi_restore_extended: r81
+// 0x00000094: pop {r5, r6, pc}
+// 0x00000096: .cfi_restore_state
+// 0x00000096: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x00, 0x00, 0xA4, 0xAF, 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x04, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
@@ -352,7 +347,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x54, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
@@ -365,41 +360,40 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: .cfi_offset: r16 at cfa-12
// 0x00000010: sdc1 f22, +40(r29)
// 0x00000014: sdc1 f20, +32(r29)
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: bne r0, r4, 0x00000040 ; +36
-// 0x00000020: addiu r29, r29, -4
-// 0x00000024: .cfi_def_cfa_offset: 68
-// 0x00000024: sw r31, +0(r29)
-// 0x00000028: bltzal r0, 0x0000002c ; +4
-// 0x0000002c: lui r1, 0x20000
-// 0x00000030: ori r1, r1, 24
-// 0x00000034: addu r1, r1, r31
-// 0x00000038: lw r31, +0(r29)
-// 0x0000003c: jr r1
-// 0x00000040: addiu r29, r29, 4
-// 0x00000044: .cfi_def_cfa_offset: 64
-// 0x00000044: nop
+// 0x00000018: bne r0, r4, 0x00000040 ; +36
+// 0x0000001c: addiu r29, r29, -4
+// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x00000020: sw r31, +0(r29)
+// 0x00000024: bltzal r0, 0x0000002c ; +4
+// 0x00000028: lui r1, 0x20000
+// 0x0000002c: ori r1, r1, 24
+// 0x00000030: addu r1, r1, r31
+// 0x00000034: lw r31, +0(r29)
+// 0x00000038: jr r1
+// 0x0000003c: addiu r29, r29, 4
+// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000040: nop
// ...
-// 0x00020044: nop
-// 0x00020048: .cfi_remember_state
-// 0x00020048: lw r31, +60(r29)
-// 0x0002004c: .cfi_restore: r31
-// 0x0002004c: lw r17, +56(r29)
-// 0x00020050: .cfi_restore: r17
-// 0x00020050: lw r16, +52(r29)
-// 0x00020054: .cfi_restore: r16
-// 0x00020054: ldc1 f22, +40(r29)
-// 0x00020058: ldc1 f20, +32(r29)
-// 0x0002005c: jr r31
-// 0x00020060: addiu r29, r29, 64
-// 0x00020064: .cfi_def_cfa_offset: 0
-// 0x00020064: .cfi_restore_state
-// 0x00020064: .cfi_def_cfa_offset: 64
+// 0x00020040: nop
+// 0x00020044: .cfi_remember_state
+// 0x00020044: lw r31, +60(r29)
+// 0x00020048: .cfi_restore: r31
+// 0x00020048: lw r17, +56(r29)
+// 0x0002004c: .cfi_restore: r17
+// 0x0002004c: lw r16, +52(r29)
+// 0x00020050: .cfi_restore: r16
+// 0x00020050: ldc1 f22, +40(r29)
+// 0x00020054: ldc1 f20, +32(r29)
+// 0x00020058: jr r31
+// 0x0002005c: addiu r29, r29, 64
+// 0x00020060: .cfi_def_cfa_offset: 0
+// 0x00020060: .cfi_restore_state
+// 0x00020060: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x00, 0x00, 0xA4, 0xFF, 0x02, 0x00, 0xA6, 0x60,
+ 0xE8, 0xFF, 0xBD, 0x67, 0x02, 0x00, 0xA6, 0x60,
0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
};
static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
@@ -409,7 +403,7 @@ static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x14, 0x00,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x10, 0x00,
0x02, 0x00, 0x0A, 0x44, 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0,
0x44, 0xD1, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
@@ -427,29 +421,28 @@ static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
// 0x00000018: .cfi_offset: r56 at cfa-40
// 0x00000018: daddiu r29, r29, -24
// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: sd r4, +0(r29)
-// 0x00000020: bnec r5, r6, 0x0000002c ; +12
-// 0x00000024: auipc r1, 2
-// 0x00000028: jic r1, 12 ; b 0x00020030 ; +131080
-// 0x0000002c: nop
+// 0x0000001c: bnec r5, r6, 0x0000002c ; +12
+// 0x00000020: auipc r1, 2
+// 0x00000024: jic r1, 12 ; b 0x00020030 ; +131080
+// 0x00000028: nop
// ...
-// 0x0002002c: nop
-// 0x00020030: .cfi_remember_state
-// 0x00020030: daddiu r29, r29, 24
-// 0x00020034: .cfi_def_cfa_offset: 40
-// 0x00020034: ldc1 f24, +0(r29)
-// 0x00020038: .cfi_restore: r56
-// 0x00020038: ldc1 f25, +8(r29)
-// 0x0002003c: .cfi_restore: r57
-// 0x0002003c: ld r16, +16(r29)
-// 0x00020040: .cfi_restore: r16
-// 0x00020040: ld r17, +24(r29)
-// 0x00020044: .cfi_restore: r17
-// 0x00020044: ld r31, +32(r29)
-// 0x00020048: .cfi_restore: r31
-// 0x00020048: daddiu r29, r29, 40
-// 0x0002004c: .cfi_def_cfa_offset: 0
-// 0x0002004c: jr r31
-// 0x00020050: nop
-// 0x00020054: .cfi_restore_state
-// 0x00020054: .cfi_def_cfa_offset: 64
+// 0x00020028: nop
+// 0x0002002c: .cfi_remember_state
+// 0x0002002c: daddiu r29, r29, 24
+// 0x00020030: .cfi_def_cfa_offset: 40
+// 0x00020030: ldc1 f24, +0(r29)
+// 0x00020034: .cfi_restore: r56
+// 0x00020034: ldc1 f25, +8(r29)
+// 0x00020038: .cfi_restore: r57
+// 0x00020038: ld r16, +16(r29)
+// 0x0002003c: .cfi_restore: r16
+// 0x0002003c: ld r17, +24(r29)
+// 0x00020040: .cfi_restore: r17
+// 0x00020040: ld r31, +32(r29)
+// 0x00020044: .cfi_restore: r31
+// 0x00020044: daddiu r29, r29, 40
+// 0x00020047: .cfi_def_cfa_offset: 0
+// 0x00020048: jr r31
+// 0x0002004c: nop
+// 0x00020050: .cfi_restore_state
+// 0x00020050: .cfi_def_cfa_offset: 64