diff options
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/quick/arm/assemble_arm.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 29 | ||||
-rw-r--r-- | compiler/dex/quick/codegen_util.cc | 7 | ||||
-rw-r--r-- | compiler/dex/quick/mips/assemble_mips.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/assemble_x86.cc | 2 | ||||
-rw-r--r-- | compiler/utils/arena_allocator.h | 3 | ||||
-rw-r--r-- | compiler/utils/scoped_arena_allocator.cc | 2 | ||||
-rw-r--r-- | compiler/utils/scoped_arena_allocator.h | 2 |
8 files changed, 30 insertions, 23 deletions
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index cac766d587..a895e6ec34 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1213,7 +1213,7 @@ void ArmMir2Lir::AssembleLIR() { cu_->NewTimingSplit("Assemble"); int assembler_retries = 0; CodeOffset starting_offset = LinkFixupInsns(first_lir_insn_, last_lir_insn_, 0); - data_offset_ = (starting_offset + 0x3) & ~0x3; + data_offset_ = RoundUp(starting_offset, 4); int32_t offset_adjustment; AssignDataOffsets(); @@ -1596,7 +1596,7 @@ void ArmMir2Lir::AssembleLIR() { LOG(FATAL) << "Assembler error - too many retries"; } starting_offset += offset_adjustment; - data_offset_ = (starting_offset + 0x3) & ~0x3; + data_offset_ = RoundUp(starting_offset, 4); AssignDataOffsets(); } } @@ -1609,7 +1609,7 @@ void ArmMir2Lir::AssembleLIR() { write_pos = EncodeLIRs(write_pos, first_lir_insn_); DCHECK_EQ(static_cast<CodeOffset>(write_pos - &code_buffer_[0]), starting_offset); - DCHECK_EQ(data_offset_, (code_buffer_.size() + 0x3) & ~0x3); + DCHECK_EQ(data_offset_, RoundUp(code_buffer_.size(), 4)); // Install literals InstallLiteralPools(); diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 163c0fed4a..d3477c9af3 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -360,6 +360,22 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { if (Runtime::Current()->ExplicitStackOverflowChecks()) { /* Load stack limit */ Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); + } else { + // Implicit stack overflow check. + // Generate a load from [sp, #-overflowsize]. If this is in the stack + // redzone we will get a segmentation fault. + // + // Caveat coder: if someone changes the kStackOverflowReservedBytes value + // we need to make sure that it's loadable in an immediate field of + // a sub instruction. Otherwise we will get a temp allocation and the + // code size will increase. + // + // This is done before the callee save instructions to avoid any possibility + // of these overflowing. This uses r12 and that's never saved in a callee + // save. + OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes); + Load32Disp(rs_r12, 0, rs_r12); + MarkPossibleStackOverflowException(); } } /* Spill core callee saves */ @@ -418,17 +434,8 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_)); } } else { - // Implicit stack overflow check. - // Generate a load from [sp, #-overflowsize]. If this is in the stack - // redzone we will get a segmentation fault. - // - // Caveat coder: if someone changes the kStackOverflowReservedBytes value - // we need to make sure that it's loadable in an immediate field of - // a sub instruction. Otherwise we will get a temp allocation and the - // code size will increase. - OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes); - Load32Disp(rs_r12, 0, rs_r12); - MarkPossibleStackOverflowException(); + // Implicit stack overflow check has already been done. Just make room on the + // stack for the frame now. OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); } } else { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 9f84e098d9..de13a2ee69 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -558,7 +558,7 @@ static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) { static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset, unsigned int element_size) { // Align to natural pointer size. - offset = (offset + (element_size - 1)) & ~(element_size - 1); + offset = RoundUp(offset, element_size); for (; lir != NULL; lir = lir->next) { lir->offset = offset; offset += element_size; @@ -758,7 +758,7 @@ int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { tab_rec->offset = offset; offset += tab_rec->size; // word align - offset = (offset + 3) & ~3; + offset = RoundUp(offset, 4); } return offset; } @@ -1049,14 +1049,13 @@ size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() { int Mir2Lir::ComputeFrameSize() { /* Figure out the frame size */ - static const uint32_t kAlignMask = kStackAlignment - 1; uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set) + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set) + sizeof(uint32_t) // Filler. + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t) + GetNumBytesForCompilerTempSpillRegion(); /* Align and set */ - return (size + kAlignMask) & ~(kAlignMask); + return RoundUp(size, kStackAlignment); } /* diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index baae31915e..b26ab579c3 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -748,7 +748,7 @@ void MipsMir2Lir::AssignOffsets() { int offset = AssignInsnOffsets(); /* Const values have to be word aligned */ - offset = (offset + 3) & ~3; + offset = RoundUp(offset, 4); /* Set up offsets for literals */ data_offset_ = offset; diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index 0fc5c6e943..7436e3960a 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -1483,7 +1483,7 @@ void X86Mir2Lir::AssignOffsets() { int offset = AssignInsnOffsets(); /* Const values have to be word aligned */ - offset = (offset + 3) & ~3; + offset = RoundUp(offset, 4); /* Set up offsets for literals */ data_offset_ = offset; diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h index 18a5bce77d..032eabc7df 100644 --- a/compiler/utils/arena_allocator.h +++ b/compiler/utils/arena_allocator.h @@ -23,6 +23,7 @@ #include "base/macros.h" #include "base/mutex.h" #include "mem_map.h" +#include "utils.h" namespace art { @@ -155,7 +156,7 @@ class ArenaAllocator : private ArenaAllocatorStats { if (UNLIKELY(running_on_valgrind_)) { return AllocValgrind(bytes, kind); } - bytes = (bytes + 3) & ~3; + bytes = RoundUp(bytes, 4); if (UNLIKELY(ptr_ + bytes > end_)) { // Obtain a new block. ObtainNewArenaForAllocation(bytes); diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc index bd78eaef0d..b8b0e6ef7d 100644 --- a/compiler/utils/scoped_arena_allocator.cc +++ b/compiler/utils/scoped_arena_allocator.cc @@ -92,7 +92,7 @@ void ArenaStack::UpdateBytesAllocated() { } void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) { - size_t rounded_bytes = (bytes + kValgrindRedZoneBytes + 3) & ~3; + size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 4); uint8_t* ptr = top_ptr_; if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) { ptr = AllocateFromNextArena(rounded_bytes); diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h index 28e86ec005..d5b003ca4d 100644 --- a/compiler/utils/scoped_arena_allocator.h +++ b/compiler/utils/scoped_arena_allocator.h @@ -67,7 +67,7 @@ class ArenaStack : private DebugStackRefCounter { if (UNLIKELY(running_on_valgrind_)) { return AllocValgrind(bytes, kind); } - size_t rounded_bytes = (bytes + 3) & ~3; + size_t rounded_bytes = RoundUp(bytes, 4); uint8_t* ptr = top_ptr_; if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) { ptr = AllocateFromNextArena(rounded_bytes); |