Clean up OatQuickMethodHeader after Quick removal.
This reduces the size of the pre-header by 8 bytes, reducing
oat file size and mmapped .text section size. The memory
needed to store a CompiledMethod by dex2oat is also reduced,
for 32-bit dex2oat by 8B and for 64-bit dex2oat by 16B. The
aosp_flounder-userdebug 32-bit and 64-bit boot.oat are each
about 1.1MiB smaller.
Disable the broken StubTest.IMT, b/27991555 .
Change-Id: I05fe45c28c8ffb7a0fa8b1117b969786748b1039
diff --git a/runtime/stack.cc b/runtime/stack.cc
index c22eb92..56ef5aa 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -21,7 +21,6 @@
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
-#include "gc_map.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "jit/jit.h"
@@ -36,7 +35,6 @@
#include "thread.h"
#include "thread_list.h"
#include "verify_object-inl.h"
-#include "vmap_table.h"
namespace art {
@@ -215,33 +213,6 @@
return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
-bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
- DCHECK_EQ(m, GetMethod());
- // Process register map (which native and runtime methods don't have)
- if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
- return false;
- }
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- if (method_header->IsOptimized()) {
- return true; // TODO: Implement.
- }
- const uint8_t* native_gc_map = method_header->GetNativeGcMap();
- CHECK(native_gc_map != nullptr) << PrettyMethod(m);
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be null or how would we compile its instructions?
- DCHECK(code_item != nullptr) << PrettyMethod(m);
- NativePcOffsetToReferenceMap map(native_gc_map);
- size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
- const uint8_t* reg_bitmap = nullptr;
- if (num_regs > 0) {
- uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- reg_bitmap = map.FindBitMap(native_pc_offset);
- DCHECK(reg_bitmap != nullptr);
- }
- // Does this register hold a reference?
- return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
-}
-
bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
VRegKind kind,
uint32_t* val) const {
@@ -273,11 +244,8 @@
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (cur_oat_quick_method_header_->IsOptimized()) {
- return GetVRegFromOptimizedCode(m, vreg, kind, val);
- } else {
- return GetVRegFromQuickCode(m, vreg, kind, val);
- }
+ DCHECK(cur_oat_quick_method_header_->IsOptimized());
+ return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
DCHECK(cur_shadow_frame_ != nullptr);
if (kind == kReferenceVReg) {
@@ -290,29 +258,6 @@
}
}
-bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
- uint32_t* val) const {
- DCHECK_EQ(m, GetMethod());
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
- const VmapTable vmap_table(method_header->GetVmapTable());
- uint32_t vmap_offset;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- return GetRegisterIfAccessible(reg, kind, val);
- } else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
- // its instructions?
- *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- return true;
- }
-}
-
bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
DCHECK_EQ(m, GetMethod());
@@ -432,11 +377,8 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (cur_oat_quick_method_header_->IsOptimized()) {
- return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
- } else {
- return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
- }
+ DCHECK(cur_oat_quick_method_header_->IsOptimized());
+ return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
DCHECK(cur_shadow_frame_ != nullptr);
*val = cur_shadow_frame_->GetVRegLong(vreg);
@@ -444,33 +386,6 @@
}
}
-bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
- VRegKind kind_hi, uint64_t* val) const {
- DCHECK_EQ(m, GetMethod());
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
- const VmapTable vmap_table(method_header->GetVmapTable());
- uint32_t vmap_offset_lo, vmap_offset_hi;
- // TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
- vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
- bool is_float = (kind_lo == kDoubleLoVReg);
- uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
- uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
- uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
- return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
- } else {
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
- // its instructions?
- uint32_t* addr = GetVRegAddrFromQuickCode(
- cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
- frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
- *val = *reinterpret_cast<uint64_t*>(addr);
- return true;
- }
-}
-
bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const {