Implement CFI for JNI.

CFI is necessary for stack unwinding in gdb, lldb, and libunwind.

Change-Id: I37eb7973f99a6975034cf0e699e138c3a9aba10f
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 10bb90b..62a11c2 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -193,6 +193,7 @@
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
   compiler/image_test.cc \
+  compiler/jni/jni_cfi_test.cc \
   compiler/jni/jni_compiler_test.cc \
   compiler/linker/arm64/relative_patcher_arm64_test.cc \
   compiler/linker/arm/relative_patcher_thumb2_test.cc \
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
new file mode 100644
index 0000000..3a0d520
--- /dev/null
+++ b/compiler/jni/jni_cfi_test.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "cfi_test.h"
+#include "gtest/gtest.h"
+#include "jni/quick/calling_convention.h"
+#include "utils/assembler.h"
+
+#include "jni/jni_cfi_test_expected.inc"
+
+namespace art {
+
+// Run the tests only on host.
+#ifndef HAVE_ANDROID_OS
+
+class JNICFITest : public CFITest {
+ public:
+  // Enable this flag to generate the expected outputs.
+  static constexpr bool kGenerateExpected = false;
+
+  void TestImpl(InstructionSet isa, const char* isa_str,
+                const std::vector<uint8_t>& expected_asm,
+                const std::vector<uint8_t>& expected_cfi) {
+    // Description of simple method.
+    const bool is_static = true;
+    const bool is_synchronized = false;
+    const char* shorty = "IIFII";
+    std::unique_ptr<JniCallingConvention> jni_conv(
+        JniCallingConvention::Create(is_static, is_synchronized, shorty, isa));
+    std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
+        ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, isa));
+    const int frame_size(jni_conv->FrameSize());
+    const std::vector<ManagedRegister>& callee_save_regs = jni_conv->CalleeSaveRegisters();
+
+    // Assemble the method.
+    std::unique_ptr<Assembler> jni_asm(Assembler::Create(isa));
+    jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(),
+                        callee_save_regs, mr_conv->EntrySpills());
+    jni_asm->IncreaseFrameSize(32);
+    jni_asm->DecreaseFrameSize(32);
+    jni_asm->RemoveFrame(frame_size, callee_save_regs);
+    jni_asm->EmitSlowPaths();
+    std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
+    MemoryRegion code(&actual_asm[0], actual_asm.size());
+    jni_asm->FinalizeInstructions(code);
+    ASSERT_EQ(jni_asm->cfi().GetCurrentCFAOffset(), frame_size);
+    const std::vector<uint8_t>& actual_cfi = *(jni_asm->cfi().data());
+
+    if (kGenerateExpected) {
+      GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
+    } else {
+      EXPECT_EQ(expected_asm, actual_asm);
+      EXPECT_EQ(expected_cfi, actual_cfi);
+    }
+  }
+};
+
+#define TEST_ISA(isa) \
+  TEST_F(JNICFITest, isa) { \
+    std::vector<uint8_t> expected_asm(expected_asm_##isa, \
+        expected_asm_##isa + arraysize(expected_asm_##isa)); \
+    std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
+        expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+    TestImpl(isa, #isa, expected_asm, expected_cfi); \
+  }
+
+TEST_ISA(kThumb2)
+TEST_ISA(kArm64)
+TEST_ISA(kX86)
+TEST_ISA(kX86_64)
+TEST_ISA(kMips)
+TEST_ISA(kMips64)
+
+#endif  // HAVE_ANDROID_OS
+
+}  // namespace art
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
new file mode 100644
index 0000000..47e6f10
--- /dev/null
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -0,0 +1,505 @@
+static constexpr uint8_t expected_asm_kThumb2[] = {
+    0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90,
+    0xCD, 0xF8, 0x84, 0x10, 0x8D, 0xED, 0x22, 0x0A, 0xCD, 0xF8, 0x8C, 0x20,
+    0xCD, 0xF8, 0x90, 0x30, 0x88, 0xB0, 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC,
+    0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D,
+};
+static constexpr uint8_t expected_cfi_kThumb2[] = {
+    0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A,
+    0x03, 0x8B, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x5C, 0x05, 0x50, 0x17, 0x05,
+    0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05,
+    0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05,
+    0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05,
+    0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01,
+    0x54, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
+    0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06,
+    0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06,
+    0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44,
+    0x0B, 0x0E, 0x80, 0x01,
+};
+// 0x00000000: push {r5, r6, r7, r8, r10, r11, lr}
+// 0x00000004: .cfi_def_cfa_offset: 28
+// 0x00000004: .cfi_offset: r5 at cfa-28
+// 0x00000004: .cfi_offset: r6 at cfa-24
+// 0x00000004: .cfi_offset: r7 at cfa-20
+// 0x00000004: .cfi_offset: r8 at cfa-16
+// 0x00000004: .cfi_offset: r10 at cfa-12
+// 0x00000004: .cfi_offset: r11 at cfa-8
+// 0x00000004: .cfi_offset: r14 at cfa-4
+// 0x00000004: vpush.f32 {s16-s31}
+// 0x00000008: .cfi_def_cfa_offset: 92
+// 0x00000008: .cfi_offset_extended: r80 at cfa-92
+// 0x00000008: .cfi_offset_extended: r81 at cfa-88
+// 0x00000008: .cfi_offset_extended: r82 at cfa-84
+// 0x00000008: .cfi_offset_extended: r83 at cfa-80
+// 0x00000008: .cfi_offset_extended: r84 at cfa-76
+// 0x00000008: .cfi_offset_extended: r85 at cfa-72
+// 0x00000008: .cfi_offset_extended: r86 at cfa-68
+// 0x00000008: .cfi_offset_extended: r87 at cfa-64
+// 0x00000008: .cfi_offset_extended: r88 at cfa-60
+// 0x00000008: .cfi_offset_extended: r89 at cfa-56
+// 0x00000008: .cfi_offset_extended: r90 at cfa-52
+// 0x00000008: .cfi_offset_extended: r91 at cfa-48
+// 0x00000008: .cfi_offset_extended: r92 at cfa-44
+// 0x00000008: .cfi_offset_extended: r93 at cfa-40
+// 0x00000008: .cfi_offset_extended: r94 at cfa-36
+// 0x00000008: .cfi_offset_extended: r95 at cfa-32
+// 0x00000008: sub sp, sp, #36
+// 0x0000000a: .cfi_def_cfa_offset: 128
+// 0x0000000a: str r0, [sp, #0]
+// 0x0000000c: str.w r1, [sp, #132]
+// 0x00000010: vstr.f32 s0, [sp, #136]
+// 0x00000014: str.w r2, [sp, #140]
+// 0x00000018: str.w r3, [sp, #144]
+// 0x0000001c: sub sp, sp, #32
+// 0x0000001e: .cfi_def_cfa_offset: 160
+// 0x0000001e: add sp, sp, #32
+// 0x00000020: .cfi_def_cfa_offset: 128
+// 0x00000020: .cfi_remember_state
+// 0x00000020: add sp, sp, #36
+// 0x00000022: .cfi_def_cfa_offset: 92
+// 0x00000022: vpop.f32 {s16-s31}
+// 0x00000026: .cfi_def_cfa_offset: 28
+// 0x00000026: .cfi_restore_extended: r80
+// 0x00000026: .cfi_restore_extended: r81
+// 0x00000026: .cfi_restore_extended: r82
+// 0x00000026: .cfi_restore_extended: r83
+// 0x00000026: .cfi_restore_extended: r84
+// 0x00000026: .cfi_restore_extended: r85
+// 0x00000026: .cfi_restore_extended: r86
+// 0x00000026: .cfi_restore_extended: r87
+// 0x00000026: .cfi_restore_extended: r88
+// 0x00000026: .cfi_restore_extended: r89
+// 0x00000026: .cfi_restore_extended: r90
+// 0x00000026: .cfi_restore_extended: r91
+// 0x00000026: .cfi_restore_extended: r92
+// 0x00000026: .cfi_restore_extended: r93
+// 0x00000026: .cfi_restore_extended: r94
+// 0x00000026: .cfi_restore_extended: r95
+// 0x00000026: pop {r5, r6, r7, r8, r10, r11, pc}
+// 0x0000002a: .cfi_restore_state
+// 0x0000002a: .cfi_def_cfa_offset: 128
+
+static constexpr uint8_t expected_asm_kArm64[] = {
+    0xFF, 0x03, 0x03, 0xD1, 0xFE, 0x5F, 0x00, 0xF9, 0xFD, 0x5B, 0x00, 0xF9,
+    0xFC, 0x57, 0x00, 0xF9, 0xFB, 0x53, 0x00, 0xF9, 0xFA, 0x4F, 0x00, 0xF9,
+    0xF9, 0x4B, 0x00, 0xF9, 0xF8, 0x47, 0x00, 0xF9, 0xF7, 0x43, 0x00, 0xF9,
+    0xF6, 0x3F, 0x00, 0xF9, 0xF5, 0x3B, 0x00, 0xF9, 0xF4, 0x37, 0x00, 0xF9,
+    0xEF, 0x33, 0x00, 0xFD, 0xEE, 0x2F, 0x00, 0xFD, 0xED, 0x2B, 0x00, 0xFD,
+    0xEC, 0x27, 0x00, 0xFD, 0xEB, 0x23, 0x00, 0xFD, 0xEA, 0x1F, 0x00, 0xFD,
+    0xE9, 0x1B, 0x00, 0xFD, 0xE8, 0x17, 0x00, 0xFD, 0xF5, 0x03, 0x12, 0xAA,
+    0xE0, 0x03, 0x00, 0xB9, 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD,
+    0xE2, 0xCF, 0x00, 0xB9, 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1,
+    0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xFE, 0x5F, 0x40, 0xF9,
+    0xFD, 0x5B, 0x40, 0xF9, 0xFC, 0x57, 0x40, 0xF9, 0xFB, 0x53, 0x40, 0xF9,
+    0xFA, 0x4F, 0x40, 0xF9, 0xF9, 0x4B, 0x40, 0xF9, 0xF8, 0x47, 0x40, 0xF9,
+    0xF7, 0x43, 0x40, 0xF9, 0xF6, 0x3F, 0x40, 0xF9, 0xF5, 0x3B, 0x40, 0xF9,
+    0xF4, 0x37, 0x40, 0xF9, 0xEF, 0x33, 0x40, 0xFD, 0xEE, 0x2F, 0x40, 0xFD,
+    0xED, 0x2B, 0x40, 0xFD, 0xEC, 0x27, 0x40, 0xFD, 0xEB, 0x23, 0x40, 0xFD,
+    0xEA, 0x1F, 0x40, 0xFD, 0xE9, 0x1B, 0x40, 0xFD, 0xE8, 0x17, 0x40, 0xFD,
+    0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
+};
+static constexpr uint8_t expected_cfi_kArm64[] = {
+    0x44, 0x0E, 0xC0, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x9D, 0x04, 0x44, 0x9C,
+    0x06, 0x44, 0x9B, 0x08, 0x44, 0x9A, 0x0A, 0x44, 0x99, 0x0C, 0x44, 0x98,
+    0x0E, 0x44, 0x97, 0x10, 0x44, 0x96, 0x12, 0x44, 0x95, 0x14, 0x44, 0x94,
+    0x16, 0x44, 0x05, 0x4F, 0x18, 0x44, 0x05, 0x4E, 0x1A, 0x44, 0x05, 0x4D,
+    0x1C, 0x44, 0x05, 0x4C, 0x1E, 0x44, 0x05, 0x4B, 0x20, 0x44, 0x05, 0x4A,
+    0x22, 0x44, 0x05, 0x49, 0x24, 0x44, 0x05, 0x48, 0x26, 0x5C, 0x0E, 0xE0,
+    0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x48, 0xDE, 0x44, 0xDD, 0x44, 0xDC,
+    0x44, 0xDB, 0x44, 0xDA, 0x44, 0xD9, 0x44, 0xD8, 0x44, 0xD7, 0x44, 0xD6,
+    0x44, 0xD5, 0x44, 0xD4, 0x44, 0x06, 0x4F, 0x44, 0x06, 0x4E, 0x44, 0x06,
+    0x4D, 0x44, 0x06, 0x4C, 0x44, 0x06, 0x4B, 0x44, 0x06, 0x4A, 0x44, 0x06,
+    0x49, 0x44, 0x06, 0x48, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
+};
+// 0x00000000: sub sp, sp, #0xc0 (192)
+// 0x00000004: .cfi_def_cfa_offset: 192
+// 0x00000004: str lr, [sp, #184]
+// 0x00000008: .cfi_offset: r30 at cfa-8
+// 0x00000008: str x29, [sp, #176]
+// 0x0000000c: .cfi_offset: r29 at cfa-16
+// 0x0000000c: str x28, [sp, #168]
+// 0x00000010: .cfi_offset: r28 at cfa-24
+// 0x00000010: str x27, [sp, #160]
+// 0x00000014: .cfi_offset: r27 at cfa-32
+// 0x00000014: str x26, [sp, #152]
+// 0x00000018: .cfi_offset: r26 at cfa-40
+// 0x00000018: str x25, [sp, #144]
+// 0x0000001c: .cfi_offset: r25 at cfa-48
+// 0x0000001c: str x24, [sp, #136]
+// 0x00000020: .cfi_offset: r24 at cfa-56
+// 0x00000020: str x23, [sp, #128]
+// 0x00000024: .cfi_offset: r23 at cfa-64
+// 0x00000024: str x22, [sp, #120]
+// 0x00000028: .cfi_offset: r22 at cfa-72
+// 0x00000028: str x21, [sp, #112]
+// 0x0000002c: .cfi_offset: r21 at cfa-80
+// 0x0000002c: str x20, [sp, #104]
+// 0x00000030: .cfi_offset: r20 at cfa-88
+// 0x00000030: str d15, [sp, #96]
+// 0x00000034: .cfi_offset_extended: r79 at cfa-96
+// 0x00000034: str d14, [sp, #88]
+// 0x00000038: .cfi_offset_extended: r78 at cfa-104
+// 0x00000038: str d13, [sp, #80]
+// 0x0000003c: .cfi_offset_extended: r77 at cfa-112
+// 0x0000003c: str d12, [sp, #72]
+// 0x00000040: .cfi_offset_extended: r76 at cfa-120
+// 0x00000040: str d11, [sp, #64]
+// 0x00000044: .cfi_offset_extended: r75 at cfa-128
+// 0x00000044: str d10, [sp, #56]
+// 0x00000048: .cfi_offset_extended: r74 at cfa-136
+// 0x00000048: str d9, [sp, #48]
+// 0x0000004c: .cfi_offset_extended: r73 at cfa-144
+// 0x0000004c: str d8, [sp, #40]
+// 0x00000050: .cfi_offset_extended: r72 at cfa-152
+// 0x00000050: mov x21, tr
+// 0x00000054: str w0, [sp]
+// 0x00000058: str w1, [sp, #196]
+// 0x0000005c: str s0, [sp, #200]
+// 0x00000060: str w2, [sp, #204]
+// 0x00000064: str w3, [sp, #208]
+// 0x00000068: sub sp, sp, #0x20 (32)
+// 0x0000006c: .cfi_def_cfa_offset: 224
+// 0x0000006c: add sp, sp, #0x20 (32)
+// 0x00000070: .cfi_def_cfa_offset: 192
+// 0x00000070: .cfi_remember_state
+// 0x00000070: mov tr, x21
+// 0x00000074: ldr lr, [sp, #184]
+// 0x00000078: .cfi_restore: r30
+// 0x00000078: ldr x29, [sp, #176]
+// 0x0000007c: .cfi_restore: r29
+// 0x0000007c: ldr x28, [sp, #168]
+// 0x00000080: .cfi_restore: r28
+// 0x00000080: ldr x27, [sp, #160]
+// 0x00000084: .cfi_restore: r27
+// 0x00000084: ldr x26, [sp, #152]
+// 0x00000088: .cfi_restore: r26
+// 0x00000088: ldr x25, [sp, #144]
+// 0x0000008c: .cfi_restore: r25
+// 0x0000008c: ldr x24, [sp, #136]
+// 0x00000090: .cfi_restore: r24
+// 0x00000090: ldr x23, [sp, #128]
+// 0x00000094: .cfi_restore: r23
+// 0x00000094: ldr x22, [sp, #120]
+// 0x00000098: .cfi_restore: r22
+// 0x00000098: ldr x21, [sp, #112]
+// 0x0000009c: .cfi_restore: r21
+// 0x0000009c: ldr x20, [sp, #104]
+// 0x000000a0: .cfi_restore: r20
+// 0x000000a0: ldr d15, [sp, #96]
+// 0x000000a4: .cfi_restore_extended: r79
+// 0x000000a4: ldr d14, [sp, #88]
+// 0x000000a8: .cfi_restore_extended: r78
+// 0x000000a8: ldr d13, [sp, #80]
+// 0x000000ac: .cfi_restore_extended: r77
+// 0x000000ac: ldr d12, [sp, #72]
+// 0x000000b0: .cfi_restore_extended: r76
+// 0x000000b0: ldr d11, [sp, #64]
+// 0x000000b4: .cfi_restore_extended: r75
+// 0x000000b4: ldr d10, [sp, #56]
+// 0x000000b8: .cfi_restore_extended: r74
+// 0x000000b8: ldr d9, [sp, #48]
+// 0x000000bc: .cfi_restore_extended: r73
+// 0x000000bc: ldr d8, [sp, #40]
+// 0x000000c0: .cfi_restore_extended: r72
+// 0x000000c0: add sp, sp, #0xc0 (192)
+// 0x000000c4: .cfi_def_cfa_offset: 0
+// 0x000000c4: ret
+// 0x000000c8: .cfi_restore_state
+// 0x000000c8: .cfi_def_cfa_offset: 192
+
+static constexpr uint8_t expected_asm_kX86[] = {
+    0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3,
+    0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24,
+    0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E,
+    0x5F, 0xC3,
+};
+static constexpr uint8_t expected_cfi_kX86[] = {
+    0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E,
+    0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50,
+    0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41,
+    0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30,
+};
+// 0x00000000: push edi
+// 0x00000001: .cfi_def_cfa_offset: 8
+// 0x00000001: .cfi_offset: r7 at cfa-8
+// 0x00000001: push esi
+// 0x00000002: .cfi_def_cfa_offset: 12
+// 0x00000002: .cfi_offset: r6 at cfa-12
+// 0x00000002: push ebp
+// 0x00000003: .cfi_def_cfa_offset: 16
+// 0x00000003: .cfi_offset: r5 at cfa-16
+// 0x00000003: add esp, -28
+// 0x00000006: .cfi_def_cfa_offset: 44
+// 0x00000006: push eax
+// 0x00000007: .cfi_def_cfa_offset: 48
+// 0x00000007: mov [esp + 52], ecx
+// 0x0000000b: movss [esp + 56], xmm0
+// 0x00000011: mov [esp + 60], edx
+// 0x00000015: mov [esp + 64], ebx
+// 0x00000019: add esp, -32
+// 0x0000001c: .cfi_def_cfa_offset: 80
+// 0x0000001c: add esp, 32
+// 0x0000001f: .cfi_def_cfa_offset: 48
+// 0x0000001f: .cfi_remember_state
+// 0x0000001f: add esp, 32
+// 0x00000022: .cfi_def_cfa_offset: 16
+// 0x00000022: pop ebp
+// 0x00000023: .cfi_def_cfa_offset: 12
+// 0x00000023: .cfi_restore: r5
+// 0x00000023: pop esi
+// 0x00000024: .cfi_def_cfa_offset: 8
+// 0x00000024: .cfi_restore: r6
+// 0x00000024: pop edi
+// 0x00000025: .cfi_def_cfa_offset: 4
+// 0x00000025: .cfi_restore: r7
+// 0x00000025: ret
+// 0x00000026: .cfi_restore_state
+// 0x00000026: .cfi_def_cfa_offset: 48
+
+static constexpr uint8_t expected_asm_kX86_64[] = {
+    0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83,
+    0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F,
+    0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2,
+    0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x89, 0x3C, 0x24, 0x89, 0xB4, 0x24,
+    0x84, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x88, 0x00, 0x00,
+    0x00, 0x89, 0x94, 0x24, 0x8C, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x90,
+    0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2,
+    0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24,
+    0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x10,
+    0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, 0x41,
+    0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3,
+};
+static constexpr uint8_t expected_cfi_kX86_64[] = {
+    0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E,
+    0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86,
+    0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0,
+    0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x65, 0x0E,
+    0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47,
+    0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E,
+    0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E,
+    0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01,
+};
+// 0x00000000: push r15
+// 0x00000002: .cfi_def_cfa_offset: 16
+// 0x00000002: .cfi_offset: r15 at cfa-16
+// 0x00000002: push r14
+// 0x00000004: .cfi_def_cfa_offset: 24
+// 0x00000004: .cfi_offset: r14 at cfa-24
+// 0x00000004: push r13
+// 0x00000006: .cfi_def_cfa_offset: 32
+// 0x00000006: .cfi_offset: r13 at cfa-32
+// 0x00000006: push r12
+// 0x00000008: .cfi_def_cfa_offset: 40
+// 0x00000008: .cfi_offset: r12 at cfa-40
+// 0x00000008: push rbp
+// 0x00000009: .cfi_def_cfa_offset: 48
+// 0x00000009: .cfi_offset: r6 at cfa-48
+// 0x00000009: push rbx
+// 0x0000000a: .cfi_def_cfa_offset: 56
+// 0x0000000a: .cfi_offset: r3 at cfa-56
+// 0x0000000a: subq rsp, 72
+// 0x0000000e: .cfi_def_cfa_offset: 128
+// 0x0000000e: movsd [rsp + 64], xmm15
+// 0x00000015: .cfi_offset: r32 at cfa-64
+// 0x00000015: movsd [rsp + 56], xmm14
+// 0x0000001c: .cfi_offset: r31 at cfa-72
+// 0x0000001c: movsd [rsp + 48], xmm13
+// 0x00000023: .cfi_offset: r30 at cfa-80
+// 0x00000023: movsd [rsp + 40], xmm12
+// 0x0000002a: .cfi_offset: r29 at cfa-88
+// 0x0000002a: mov [rsp], edi
+// 0x0000002d: mov [rsp + 132], esi
+// 0x00000034: movss [rsp + 136], xmm0
+// 0x0000003d: mov [rsp + 140], edx
+// 0x00000044: mov [rsp + 144], ecx
+// 0x0000004b: addq rsp, -32
+// 0x0000004f: .cfi_def_cfa_offset: 160
+// 0x0000004f: addq rsp, 32
+// 0x00000053: .cfi_def_cfa_offset: 128
+// 0x00000053: .cfi_remember_state
+// 0x00000053: movsd xmm12, [rsp + 40]
+// 0x0000005a: .cfi_restore: r29
+// 0x0000005a: movsd xmm13, [rsp + 48]
+// 0x00000061: .cfi_restore: r30
+// 0x00000061: movsd xmm14, [rsp + 56]
+// 0x00000068: .cfi_restore: r31
+// 0x00000068: movsd xmm15, [rsp + 64]
+// 0x0000006f: .cfi_restore: r32
+// 0x0000006f: addq rsp, 72
+// 0x00000073: .cfi_def_cfa_offset: 56
+// 0x00000073: pop rbx
+// 0x00000074: .cfi_def_cfa_offset: 48
+// 0x00000074: .cfi_restore: r3
+// 0x00000074: pop rbp
+// 0x00000075: .cfi_def_cfa_offset: 40
+// 0x00000075: .cfi_restore: r6
+// 0x00000075: pop r12
+// 0x00000077: .cfi_def_cfa_offset: 32
+// 0x00000077: .cfi_restore: r12
+// 0x00000077: pop r13
+// 0x00000079: .cfi_def_cfa_offset: 24
+// 0x00000079: .cfi_restore: r13
+// 0x00000079: pop r14
+// 0x0000007b: .cfi_def_cfa_offset: 16
+// 0x0000007b: .cfi_restore: r14
+// 0x0000007b: pop r15
+// 0x0000007d: .cfi_def_cfa_offset: 8
+// 0x0000007d: .cfi_restore: r15
+// 0x0000007d: ret
+// 0x0000007e: .cfi_restore_state
+// 0x0000007e: .cfi_def_cfa_offset: 128
+
+static constexpr uint8_t expected_asm_kMips[] = {
+    0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB8, 0xAF,
+    0x34, 0x00, 0xAF, 0xAF, 0x30, 0x00, 0xAE, 0xAF, 0x2C, 0x00, 0xAD, 0xAF,
+    0x28, 0x00, 0xAC, 0xAF, 0x24, 0x00, 0xAB, 0xAF, 0x20, 0x00, 0xAA, 0xAF,
+    0x1C, 0x00, 0xA9, 0xAF, 0x18, 0x00, 0xA8, 0xAF, 0x00, 0x00, 0xA4, 0xAF,
+    0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xA6, 0xAF, 0x4C, 0x00, 0xA7, 0xAF,
+    0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27, 0x18, 0x00, 0xA8, 0x8F,
+    0x1C, 0x00, 0xA9, 0x8F, 0x20, 0x00, 0xAA, 0x8F, 0x24, 0x00, 0xAB, 0x8F,
+    0x28, 0x00, 0xAC, 0x8F, 0x2C, 0x00, 0xAD, 0x8F, 0x30, 0x00, 0xAE, 0x8F,
+    0x34, 0x00, 0xAF, 0x8F, 0x38, 0x00, 0xB8, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
+    0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+};
+static constexpr uint8_t expected_cfi_kMips[] = {
+    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x98, 0x02, 0x44, 0x8F, 0x03,
+    0x44, 0x8E, 0x04, 0x44, 0x8D, 0x05, 0x44, 0x8C, 0x06, 0x44, 0x8B, 0x07,
+    0x44, 0x8A, 0x08, 0x44, 0x89, 0x09, 0x44, 0x88, 0x0A, 0x54, 0x0E, 0x60,
+    0x44, 0x0E, 0x40, 0x0A, 0x44, 0xC8, 0x44, 0xC9, 0x44, 0xCA, 0x44, 0xCB,
+    0x44, 0xCC, 0x44, 0xCD, 0x44, 0xCE, 0x44, 0xCF, 0x44, 0xD8, 0x44, 0xDF,
+    0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+};
+// 0x00000000: addiu r29, r29, -64
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: sw r31, +60(r29)
+// 0x00000008: .cfi_offset: r31 at cfa-4
+// 0x00000008: sw r24, +56(r29)
+// 0x0000000c: .cfi_offset: r24 at cfa-8
+// 0x0000000c: sw r15, +52(r29)
+// 0x00000010: .cfi_offset: r15 at cfa-12
+// 0x00000010: sw r14, +48(r29)
+// 0x00000014: .cfi_offset: r14 at cfa-16
+// 0x00000014: sw r13, +44(r29)
+// 0x00000018: .cfi_offset: r13 at cfa-20
+// 0x00000018: sw r12, +40(r29)
+// 0x0000001c: .cfi_offset: r12 at cfa-24
+// 0x0000001c: sw r11, +36(r29)
+// 0x00000020: .cfi_offset: r11 at cfa-28
+// 0x00000020: sw r10, +32(r29)
+// 0x00000024: .cfi_offset: r10 at cfa-32
+// 0x00000024: sw r9, +28(r29)
+// 0x00000028: .cfi_offset: r9 at cfa-36
+// 0x00000028: sw r8, +24(r29)
+// 0x0000002c: .cfi_offset: r8 at cfa-40
+// 0x0000002c: sw r4, +0(r29)
+// 0x00000030: sw r5, +68(r29)
+// 0x00000034: sw r6, +72(r29)
+// 0x00000038: sw r7, +76(r29)
+// 0x0000003c: addiu r29, r29, -32
+// 0x00000040: .cfi_def_cfa_offset: 96
+// 0x00000040: addiu r29, r29, 32
+// 0x00000044: .cfi_def_cfa_offset: 64
+// 0x00000044: .cfi_remember_state
+// 0x00000044: lw r8, +24(r29)
+// 0x00000048: .cfi_restore: r8
+// 0x00000048: lw r9, +28(r29)
+// 0x0000004c: .cfi_restore: r9
+// 0x0000004c: lw r10, +32(r29)
+// 0x00000050: .cfi_restore: r10
+// 0x00000050: lw r11, +36(r29)
+// 0x00000054: .cfi_restore: r11
+// 0x00000054: lw r12, +40(r29)
+// 0x00000058: .cfi_restore: r12
+// 0x00000058: lw r13, +44(r29)
+// 0x0000005c: .cfi_restore: r13
+// 0x0000005c: lw r14, +48(r29)
+// 0x00000060: .cfi_restore: r14
+// 0x00000060: lw r15, +52(r29)
+// 0x00000064: .cfi_restore: r15
+// 0x00000064: lw r24, +56(r29)
+// 0x00000068: .cfi_restore: r24
+// 0x00000068: lw r31, +60(r29)
+// 0x0000006c: .cfi_restore: r31
+// 0x0000006c: addiu r29, r29, 64
+// 0x00000070: .cfi_def_cfa_offset: 0
+// 0x00000070: jalr r0, r31
+// 0x00000074: nop
+// 0x00000078: .cfi_restore_state
+// 0x00000078: .cfi_def_cfa_offset: 64
+
+static constexpr uint8_t expected_asm_kMips64[] = {
+    0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
+    0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF,
+    0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF,
+    0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xAF, 0x64, 0x00, 0xA5, 0xAF,
+    0x68, 0x00, 0xAE, 0xE7, 0x6C, 0x00, 0xA7, 0xAF, 0x70, 0x00, 0xA8, 0xAF,
+    0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF,
+    0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF,
+    0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF,
+    0x50, 0x00, 0xBE, 0xDF, 0x58, 0x00, 0xBF, 0xDF, 0x60, 0x00, 0xBD, 0x67,
+    0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+};
+static constexpr uint8_t expected_cfi_kMips64[] = {
+    0x44, 0x0E, 0x60, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
+    0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E,
+    0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x80, 0x01, 0x44, 0x0E,
+    0x60, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
+    0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48,
+    0x0B, 0x0E, 0x60,
+};
+// 0x00000000: daddiu r29, r29, -96
+// 0x00000004: .cfi_def_cfa_offset: 96
+// 0x00000004: sd r31, +88(r29)
+// 0x00000008: .cfi_offset: r31 at cfa-8
+// 0x00000008: sd r30, +80(r29)
+// 0x0000000c: .cfi_offset: r30 at cfa-16
+// 0x0000000c: sd r28, +72(r29)
+// 0x00000010: .cfi_offset: r28 at cfa-24
+// 0x00000010: sd r23, +64(r29)
+// 0x00000014: .cfi_offset: r23 at cfa-32
+// 0x00000014: sd r22, +56(r29)
+// 0x00000018: .cfi_offset: r22 at cfa-40
+// 0x00000018: sd r21, +48(r29)
+// 0x0000001c: .cfi_offset: r21 at cfa-48
+// 0x0000001c: sd r20, +40(r29)
+// 0x00000020: .cfi_offset: r20 at cfa-56
+// 0x00000020: sd r19, +32(r29)
+// 0x00000024: .cfi_offset: r19 at cfa-64
+// 0x00000024: sd r18, +24(r29)
+// 0x00000028: .cfi_offset: r18 at cfa-72
+// 0x00000028: sw r4, +0(r29)
+// 0x0000002c: sw r5, +100(r29)
+// 0x00000030: swc1 f14, +104(r29)
+// 0x00000034: sw r7, +108(r29)
+// 0x00000038: sw r8, +112(r29)
+// 0x0000003c: daddiu r29, r29, -32
+// 0x00000040: .cfi_def_cfa_offset: 128
+// 0x00000040: daddiu r29, r29, 32
+// 0x00000044: .cfi_def_cfa_offset: 96
+// 0x00000044: .cfi_remember_state
+// 0x00000044: ld r18, +24(r29)
+// 0x00000048: .cfi_restore: r18
+// 0x00000048: ld r19, +32(r29)
+// 0x0000004c: .cfi_restore: r19
+// 0x0000004c: ld r20, +40(r29)
+// 0x00000050: .cfi_restore: r20
+// 0x00000050: ld r21, +48(r29)
+// 0x00000054: .cfi_restore: r21
+// 0x00000054: ld r22, +56(r29)
+// 0x00000058: .cfi_restore: r22
+// 0x00000058: ld r23, +64(r29)
+// 0x0000005c: .cfi_restore: r23
+// 0x0000005c: ld r28, +72(r29)
+// 0x00000060: .cfi_restore: r28
+// 0x00000060: ld r30, +80(r29)
+// 0x00000064: .cfi_restore: r30
+// 0x00000064: ld r31, +88(r29)
+// 0x00000068: .cfi_restore: r31
+// 0x00000068: daddiu r29, r29, 96
+// 0x0000006c: .cfi_def_cfa_offset: 0
+// 0x0000006c: jr r31
+// 0x00000070: nop
+// 0x00000074: .cfi_restore_state
+// 0x00000074: .cfi_def_cfa_offset: 96
+
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 45e2fd0..49b7cd1 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -104,6 +104,7 @@
   const size_t frame_size(main_jni_conv->FrameSize());
   const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
   __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
 
   // 2. Set up the HandleScope
   mr_conv->ResetIterator(FrameOffset(frame_size));
@@ -423,7 +424,9 @@
 
   // 16. Remove activation - need to restore callee save registers since the GC may have changed
   //     them.
+  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
   __ RemoveFrame(frame_size, callee_save_regs);
+  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
 
   // 17. Finalize code generation
   __ EmitSlowPaths();
@@ -438,7 +441,7 @@
                                                     frame_size,
                                                     main_jni_conv->CoreSpillMask(),
                                                     main_jni_conv->FpSpillMask(),
-                                                    ArrayRef<const uint8_t>());
+                                                    ArrayRef<const uint8_t>(*jni_asm->cfi().data()));
 }
 
 // Copy a single parameter from the managed to the JNI calling convention
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 8059289..c410660 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -370,40 +370,46 @@
   }
 }
 
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::ArmCore(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(SRegister reg) {
+  return dwarf::Reg::ArmFp(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 4;
 
 void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                               const std::vector<ManagedRegister>& callee_save_regs,
                               const ManagedRegisterEntrySpills& entry_spills) {
+  CHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet
   CHECK_ALIGNED(frame_size, kStackAlignment);
   CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
 
   // Push callee saves and link register.
-  RegList push_list = 1 << LR;
-  size_t pushed_values = 1;
-  int32_t min_s = kNumberOfSRegisters;
-  int32_t max_s = -1;
-  for (size_t i = 0; i < callee_save_regs.size(); i++) {
-    if (callee_save_regs.at(i).AsArm().IsCoreRegister()) {
-      Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
-      push_list |= 1 << reg;
-      pushed_values++;
+  RegList core_spill_mask = 1 << LR;
+  uint32_t fp_spill_mask = 0;
+  for (const ManagedRegister& reg : callee_save_regs) {
+    if (reg.AsArm().IsCoreRegister()) {
+      core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
     } else {
-      CHECK(callee_save_regs.at(i).AsArm().IsSRegister());
-      min_s = std::min(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), min_s);
-      max_s = std::max(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), max_s);
+      fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
     }
   }
-  PushList(push_list);
-  if (max_s != -1) {
-    pushed_values += 1 + max_s - min_s;
-    vpushs(static_cast<SRegister>(min_s), 1 + max_s - min_s);
+  PushList(core_spill_mask);
+  cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+  cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
+  if (fp_spill_mask != 0) {
+    vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+    cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+    cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
   }
 
   // Increase frame to required size.
+  int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
   CHECK_GT(frame_size, pushed_values * kFramePointerSize);  // Must at least have space for Method*.
-  size_t adjust = frame_size - (pushed_values * kFramePointerSize);
-  IncreaseFrameSize(adjust);
+  IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize);  // handles CFI as well.
 
   // Write out Method*.
   StoreToOffset(kStoreWord, R0, SP, 0);
@@ -432,46 +438,46 @@
 void ArmAssembler::RemoveFrame(size_t frame_size,
                               const std::vector<ManagedRegister>& callee_save_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
+
   // Compute callee saves to pop and PC.
-  RegList pop_list = 1 << PC;
-  size_t pop_values = 1;
-  int32_t min_s = kNumberOfSRegisters;
-  int32_t max_s = -1;
-  for (size_t i = 0; i < callee_save_regs.size(); i++) {
-    if (callee_save_regs.at(i).AsArm().IsCoreRegister()) {
-      Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
-      pop_list |= 1 << reg;
-      pop_values++;
+  RegList core_spill_mask = 1 << PC;
+  uint32_t fp_spill_mask = 0;
+  for (const ManagedRegister& reg : callee_save_regs) {
+    if (reg.AsArm().IsCoreRegister()) {
+      core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
     } else {
-      CHECK(callee_save_regs.at(i).AsArm().IsSRegister());
-      min_s = std::min(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), min_s);
-      max_s = std::max(static_cast<int>(callee_save_regs.at(i).AsArm().AsSRegister()), max_s);
+      fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
     }
   }
 
-  if (max_s != -1) {
-    pop_values += 1 + max_s - min_s;
-  }
-
   // Decrease frame to start of callee saves.
+  int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
   CHECK_GT(frame_size, pop_values * kFramePointerSize);
-  size_t adjust = frame_size - (pop_values * kFramePointerSize);
-  DecreaseFrameSize(adjust);
+  DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize));  // handles CFI as well.
 
-  if (max_s != -1) {
-    vpops(static_cast<SRegister>(min_s), 1 + max_s - min_s);
+  if (fp_spill_mask != 0) {
+    vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+    cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+    cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
   }
 
   // Pop callee saves and PC.
-  PopList(pop_list);
+  PopList(core_spill_mask);
+
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 void ArmAssembler::IncreaseFrameSize(size_t adjust) {
   AddConstant(SP, -adjust);
+  cfi_.AdjustCFAOffset(adjust);
 }
 
 void ArmAssembler::DecreaseFrameSize(size_t adjust) {
   AddConstant(SP, adjust);
+  cfi_.AdjustCFAOffset(-adjust);
 }
 
 void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
index a496c87..5fde9e8 100644
--- a/compiler/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -19,6 +19,7 @@
 
 #include "base/logging.h"
 #include "constants_arm.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 58c7367..fbd0411 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -63,12 +63,14 @@
 void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant(SP, -adjust);
+  cfi().AdjustCFAOffset(adjust);
 }
 
 // See Arm64 PCS Section 5.2.2.1.
 void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant(SP, adjust);
+  cfi().AdjustCFAOffset(-adjust);
 }
 
 void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
@@ -638,6 +640,14 @@
   ___ Brk();
 }
 
+static dwarf::Reg DWARFReg(XRegister reg) {
+  return dwarf::Reg::Arm64Core(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(DRegister reg) {
+  return dwarf::Reg::Arm64Fp(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 8;
 constexpr unsigned int kJniRefSpillRegsSize = 11 + 8;
 
@@ -660,45 +670,20 @@
   // TUNING: Use stp.
   // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
   size_t reg_offset = frame_size;
-  reg_offset -= 8;
-  StoreToOffset(LR, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X29, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X28, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X27, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X26, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X25, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X24, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X23, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X22, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X21, SP, reg_offset);
-  reg_offset -= 8;
-  StoreToOffset(X20, SP, reg_offset);
-
-  reg_offset -= 8;
-  StoreDToOffset(D15, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D14, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D13, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D12, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D11, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D10, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D9, SP, reg_offset);
-  reg_offset -= 8;
-  StoreDToOffset(D8, SP, reg_offset);
+  static constexpr XRegister x_spills[] = {
+      LR, X29, X28, X27, X26, X25, X24, X23, X22, X21, X20 };
+  for (size_t i = 0; i < arraysize(x_spills); i++) {
+    XRegister reg = x_spills[i];
+    reg_offset -= 8;
+    StoreToOffset(reg, SP, reg_offset);
+    cfi_.RelOffset(DWARFReg(reg), reg_offset);
+  }
+  for (int d = 15; d >= 8; d--) {
+    DRegister reg = static_cast<DRegister>(d);
+    reg_offset -= 8;
+    StoreDToOffset(reg, SP, reg_offset);
+    cfi_.RelOffset(DWARFReg(reg), reg_offset);
+  }
 
   // Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
   // This way we make sure that TR is not trashed by native code.
@@ -734,6 +719,7 @@
 
 void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
 
   // For now we only check that the size of the frame is greater than the spill size.
   CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
@@ -748,51 +734,30 @@
   // TUNING: Use ldp.
   // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
   size_t reg_offset = frame_size;
-  reg_offset -= 8;
-  LoadFromOffset(LR, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X29, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X28, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X27, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X26, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X25, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X24, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X23, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X22, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X21, SP, reg_offset);
-  reg_offset -= 8;
-  LoadFromOffset(X20, SP, reg_offset);
-
-  reg_offset -= 8;
-  LoadDFromOffset(D15, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D14, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D13, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D12, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D11, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D10, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D9, SP, reg_offset);
-  reg_offset -= 8;
-  LoadDFromOffset(D8, SP, reg_offset);
+  static constexpr XRegister x_spills[] = {
+      LR, X29, X28, X27, X26, X25, X24, X23, X22, X21, X20 };
+  for (size_t i = 0; i < arraysize(x_spills); i++) {
+    XRegister reg = x_spills[i];
+    reg_offset -= 8;
+    LoadFromOffset(reg, SP, reg_offset);
+    cfi_.Restore(DWARFReg(reg));
+  }
+  for (int d = 15; d >= 8; d--) {
+    DRegister reg = static_cast<DRegister>(d);
+    reg_offset -= 8;
+    LoadDFromOffset(reg, SP, reg_offset);
+    cfi_.Restore(DWARFReg(reg));
+  }
 
   // Decrease frame size to start of callee saved regs.
   DecreaseFrameSize(frame_size);
 
   // Pop callee saved and return to LR.
   ___ Ret();
+
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 }  // namespace arm64
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index e1d6f31..62c1d4d 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -19,6 +19,7 @@
 
 #include "base/logging.h"
 #include "constants_arm64.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 5340dd3..36342c6 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -105,6 +105,9 @@
   CHECK_EQ(Size(), old_size);
 }
 
+void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() {
+  this->AdvancePC(assembler_->CodeSize());
+}
 
 Assembler* Assembler::Create(InstructionSet instruction_set) {
   switch (instruction_set) {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 323f93c..ebafd3d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -29,6 +29,7 @@
 #include "offsets.h"
 #include "x86/constants_x86.h"
 #include "x86_64/constants_x86_64.h"
+#include "dwarf/debug_frame_opcode_writer.h"
 
 namespace art {
 
@@ -354,6 +355,23 @@
   friend class AssemblerFixup;
 };
 
+// The purpose of this class is to ensure that we do not have to explicitly
+// call the AdvancePC method (which is good for convenience and correctness).
+class DebugFrameOpCodeWriterForAssembler FINAL
+    : public dwarf::DebugFrameOpCodeWriter<> {
+ public:
+  // This method is called the by the opcode writers.
+  virtual void ImplicitlyAdvancePC() FINAL;
+
+  explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
+      : dwarf::DebugFrameOpCodeWriter<>(),
+        assembler_(buffer) {
+  }
+
+ private:
+  Assembler* assembler_;
+};
+
 class Assembler {
  public:
   static Assembler* Create(InstructionSet instruction_set);
@@ -506,10 +524,18 @@
 
   virtual ~Assembler() {}
 
+  /**
+   * @brief Buffer of DWARF's Call Frame Information opcodes.
+   * @details It is used by debuggers and other tools to unwind the call stack.
+   */
+  DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
+
  protected:
-  Assembler() : buffer_() {}
+  Assembler() : buffer_(), cfi_(this) {}
 
   AssemblerBuffer buffer_;
+
+  DebugFrameOpCodeWriterForAssembler cfi_;
 };
 
 }  // namespace art
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index b5437b0..709a911 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -536,6 +536,10 @@
   Sdc1(reg, base, offset);
 }
 
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::MipsCore(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 4;
 
 void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
@@ -549,10 +553,12 @@
   // Push callee saves and return address
   int stack_offset = frame_size - kFramePointerSize;
   StoreToOffset(kStoreWord, RA, SP, stack_offset);
+  cfi_.RelOffset(DWARFReg(RA), stack_offset);
   for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
     stack_offset -= kFramePointerSize;
     Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
     StoreToOffset(kStoreWord, reg, SP, stack_offset);
+    cfi_.RelOffset(DWARFReg(reg), stack_offset);
   }
 
   // Write out Method*.
@@ -568,31 +574,40 @@
 void MipsAssembler::RemoveFrame(size_t frame_size,
                                 const std::vector<ManagedRegister>& callee_save_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
 
   // Pop callee saves and return address
   int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
   for (size_t i = 0; i < callee_save_regs.size(); ++i) {
     Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
     LoadFromOffset(kLoadWord, reg, SP, stack_offset);
+    cfi_.Restore(DWARFReg(reg));
     stack_offset += kFramePointerSize;
   }
   LoadFromOffset(kLoadWord, RA, SP, stack_offset);
+  cfi_.Restore(DWARFReg(RA));
 
   // Decrease frame to required size.
   DecreaseFrameSize(frame_size);
 
   // Then jump to the return address.
   Jr(RA);
+
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 void MipsAssembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant(SP, SP, -adjust);
+  cfi_.AdjustCFAOffset(adjust);
 }
 
 void MipsAssembler::DecreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant(SP, SP, adjust);
+  cfi_.AdjustCFAOffset(-adjust);
 }
 
 void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
index dd55cc4..40d39e3 100644
--- a/compiler/utils/mips/managed_register_mips.h
+++ b/compiler/utils/mips/managed_register_mips.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
 
 #include "constants_mips.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 388d274..282ab96 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -568,6 +568,10 @@
   }
 }
 
+static dwarf::Reg DWARFReg(GpuRegister reg) {
+  return dwarf::Reg::Mips64Core(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 8;
 
 void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
@@ -581,10 +585,12 @@
   // Push callee saves and return address
   int stack_offset = frame_size - kFramePointerSize;
   StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
+  cfi_.RelOffset(DWARFReg(RA), stack_offset);
   for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
     stack_offset -= kFramePointerSize;
     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
     StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
+    cfi_.RelOffset(DWARFReg(reg), stack_offset);
   }
 
   // Write out Method*.
@@ -612,31 +618,40 @@
 void Mips64Assembler::RemoveFrame(size_t frame_size,
                                   const std::vector<ManagedRegister>& callee_save_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
 
   // Pop callee saves and return address
   int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
   for (size_t i = 0; i < callee_save_regs.size(); ++i) {
     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
     LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
+    cfi_.Restore(DWARFReg(reg));
     stack_offset += kFramePointerSize;
   }
   LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
+  cfi_.Restore(DWARFReg(RA));
 
   // Decrease frame to required size.
   DecreaseFrameSize(frame_size);
 
   // Then jump to the return address.
   Jr(RA);
+
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant64(SP, SP, -adjust);
+  cfi_.AdjustCFAOffset(adjust);
 }
 
 void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   AddConstant64(SP, SP, adjust);
+  cfi_.AdjustCFAOffset(-adjust);
 }
 
 void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
index 924a928..4c4705b 100644
--- a/compiler/utils/mips64/managed_register_mips64.h
+++ b/compiler/utils/mips64/managed_register_mips64.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
 
 #include "constants_mips64.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 4b71412..8ce9375 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1630,18 +1630,25 @@
   EmitOperand(reg_or_opcode, Operand(operand));
 }
 
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::X86Core(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 4;
 
 void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                               const std::vector<ManagedRegister>& spill_regs,
                               const ManagedRegisterEntrySpills& entry_spills) {
   DCHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet.
+  cfi_.SetCurrentCFAOffset(4);  // Return address on stack.
   CHECK_ALIGNED(frame_size, kStackAlignment);
   int gpr_count = 0;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
     Register spill = spill_regs.at(i).AsX86().AsCpuRegister();
     pushl(spill);
     gpr_count++;
+    cfi_.AdjustCFAOffset(kFramePointerSize);
+    cfi_.RelOffset(DWARFReg(spill), 0);
   }
 
   // return address then method on stack.
@@ -1649,7 +1656,10 @@
                    sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
                    kFramePointerSize /*return address*/;
   addl(ESP, Immediate(-adjust));
+  cfi_.AdjustCFAOffset(adjust);
   pushl(method_reg.AsX86().AsCpuRegister());
+  cfi_.AdjustCFAOffset(kFramePointerSize);
+  DCHECK_EQ(static_cast<size_t>(cfi_.GetCurrentCFAOffset()), frame_size);
 
   for (size_t i = 0; i < entry_spills.size(); ++i) {
     ManagedRegisterSpill spill = entry_spills.at(i);
@@ -1671,25 +1681,33 @@
 void X86Assembler::RemoveFrame(size_t frame_size,
                             const std::vector<ManagedRegister>& spill_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
   int adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
                sizeof(StackReference<mirror::ArtMethod>);
   addl(ESP, Immediate(adjust));
+  cfi_.AdjustCFAOffset(-adjust);
   for (size_t i = 0; i < spill_regs.size(); ++i) {
-    x86::X86ManagedRegister spill = spill_regs.at(i).AsX86();
-    DCHECK(spill.IsCpuRegister());
-    popl(spill.AsCpuRegister());
+    Register spill = spill_regs.at(i).AsX86().AsCpuRegister();
+    popl(spill);
+    cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+    cfi_.Restore(DWARFReg(spill));
   }
   ret();
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 void X86Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addl(ESP, Immediate(-adjust));
+  cfi_.AdjustCFAOffset(adjust);
 }
 
 void X86Assembler::DecreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addl(ESP, Immediate(adjust));
+  cfi_.AdjustCFAOffset(-adjust);
 }
 
 void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 09d2b49..4e8c41e 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
 
 #include "constants_x86.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 25d24fb..780c1b4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2167,12 +2167,20 @@
   }
 }
 
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+  return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
 constexpr size_t kFramePointerSize = 8;
 
 void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                                  const std::vector<ManagedRegister>& spill_regs,
                                  const ManagedRegisterEntrySpills& entry_spills) {
   DCHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet.
+  cfi_.SetCurrentCFAOffset(8);  // Return address on stack.
   CHECK_ALIGNED(frame_size, kStackAlignment);
   int gpr_count = 0;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -2180,6 +2188,8 @@
     if (spill.IsCpuRegister()) {
       pushq(spill.AsCpuRegister());
       gpr_count++;
+      cfi_.AdjustCFAOffset(kFramePointerSize);
+      cfi_.RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
     }
   }
   // return address then method on stack.
@@ -2187,6 +2197,7 @@
                           - (gpr_count * kFramePointerSize)
                           - kFramePointerSize /*return address*/;
   subq(CpuRegister(RSP), Immediate(rest_of_frame));
+  cfi_.AdjustCFAOffset(rest_of_frame);
 
   // spill xmms
   int64_t offset = rest_of_frame;
@@ -2195,6 +2206,7 @@
     if (spill.IsXmmRegister()) {
       offset -= sizeof(double);
       movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
+      cfi_.RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
     }
   }
 
@@ -2226,6 +2238,7 @@
 void X86_64Assembler::RemoveFrame(size_t frame_size,
                             const std::vector<ManagedRegister>& spill_regs) {
   CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi_.RememberState();
   int gpr_count = 0;
   // unspill xmms
   int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize;
@@ -2234,28 +2247,38 @@
     if (spill.IsXmmRegister()) {
       offset += sizeof(double);
       movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
+      cfi_.Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
     } else {
       gpr_count++;
     }
   }
-  addq(CpuRegister(RSP), Immediate(static_cast<int64_t>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize));
+  int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
+  addq(CpuRegister(RSP), Immediate(adjust));
+  cfi_.AdjustCFAOffset(-adjust);
   for (size_t i = 0; i < spill_regs.size(); ++i) {
     x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64();
     if (spill.IsCpuRegister()) {
       popq(spill.AsCpuRegister());
+      cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+      cfi_.Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
     }
   }
   ret();
+  // The CFI should be restored for any code that follows the exit block.
+  cfi_.RestoreState();
+  cfi_.DefCFAOffset(frame_size);
 }
 
 void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+  cfi_.AdjustCFAOffset(adjust);
 }
 
 void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addq(CpuRegister(RSP), Immediate(adjust));
+  cfi_.AdjustCFAOffset(-adjust);
 }
 
 void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
index 822659f..47bbb44 100644
--- a/compiler/utils/x86_64/managed_register_x86_64.h
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
 
 #include "constants_x86_64.h"
+#include "dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {