ARM64: Improve code generated to spill/restore for slow paths.
Aligning the accesses allows generating better code.
Before:
add x16, sp, #0x44 (68)
stp x0, x1, [x16, #-16]
After:
stp x0, x1, [sp, #56]
Change-Id: I3e20ad3fa59d00aee4b4d14ea9d59c7cd546509e
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b8540ba..47e6625 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -211,6 +211,8 @@
size_t maximum_number_of_live_fpu_registers,
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order);
+ // Backends can override this as necessary. For most, no special alignment is required.
+ virtual uint32_t GetPreferredSlotsAlignment() const { return 1; }
uint32_t GetFrameSize() const { return frame_size_; }
void SetFrameSize(uint32_t size) { frame_size_ = size; }