summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Andreas Gampe <agampe@google.com> 2014-05-05 20:47:19 -0700
committer Andreas Gampe <agampe@google.com> 2014-05-05 20:47:19 -0700
commit660188264dee3c8f3510e2e24c11816c6b60f197 (patch)
treecd18ee6c9328650110f06d14905468ea320342b4
parent2a12ad460af139a03c3e9bf5fc7886a7521b333e (diff)
ART: Use utils.h::RoundUp instead of explicit bit-fiddling
Change-Id: I249a2cfeb044d3699d02e13d42b8e72518571640
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc6
-rw-r--r--compiler/dex/quick/codegen_util.cc7
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc2
-rw-r--r--compiler/utils/arena_allocator.h3
-rw-r--r--compiler/utils/scoped_arena_allocator.cc2
-rw-r--r--compiler/utils/scoped_arena_allocator.h2
7 files changed, 12 insertions, 12 deletions
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index cac766d587..a895e6ec34 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1213,7 +1213,7 @@ void ArmMir2Lir::AssembleLIR() {
cu_->NewTimingSplit("Assemble");
int assembler_retries = 0;
CodeOffset starting_offset = LinkFixupInsns(first_lir_insn_, last_lir_insn_, 0);
- data_offset_ = (starting_offset + 0x3) & ~0x3;
+ data_offset_ = RoundUp(starting_offset, 4);
int32_t offset_adjustment;
AssignDataOffsets();
@@ -1596,7 +1596,7 @@ void ArmMir2Lir::AssembleLIR() {
LOG(FATAL) << "Assembler error - too many retries";
}
starting_offset += offset_adjustment;
- data_offset_ = (starting_offset + 0x3) & ~0x3;
+ data_offset_ = RoundUp(starting_offset, 4);
AssignDataOffsets();
}
}
@@ -1609,7 +1609,7 @@ void ArmMir2Lir::AssembleLIR() {
write_pos = EncodeLIRs(write_pos, first_lir_insn_);
DCHECK_EQ(static_cast<CodeOffset>(write_pos - &code_buffer_[0]), starting_offset);
- DCHECK_EQ(data_offset_, (code_buffer_.size() + 0x3) & ~0x3);
+ DCHECK_EQ(data_offset_, RoundUp(code_buffer_.size(), 4));
// Install literals
InstallLiteralPools();
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f84e098d9..de13a2ee69 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -558,7 +558,7 @@ static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset,
unsigned int element_size) {
// Align to natural pointer size.
- offset = (offset + (element_size - 1)) & ~(element_size - 1);
+ offset = RoundUp(offset, element_size);
for (; lir != NULL; lir = lir->next) {
lir->offset = offset;
offset += element_size;
@@ -758,7 +758,7 @@ int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
tab_rec->offset = offset;
offset += tab_rec->size;
// word align
- offset = (offset + 3) & ~3;
+ offset = RoundUp(offset, 4);
}
return offset;
}
@@ -1049,14 +1049,13 @@ size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
int Mir2Lir::ComputeFrameSize() {
/* Figure out the frame size */
- static const uint32_t kAlignMask = kStackAlignment - 1;
uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set)
+ num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set)
+ sizeof(uint32_t) // Filler.
+ (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t)
+ GetNumBytesForCompilerTempSpillRegion();
/* Align and set */
- return (size + kAlignMask) & ~(kAlignMask);
+ return RoundUp(size, kStackAlignment);
}
/*
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index baae31915e..b26ab579c3 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -748,7 +748,7 @@ void MipsMir2Lir::AssignOffsets() {
int offset = AssignInsnOffsets();
/* Const values have to be word aligned */
- offset = (offset + 3) & ~3;
+ offset = RoundUp(offset, 4);
/* Set up offsets for literals */
data_offset_ = offset;
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 58e2f425ed..0ce081b975 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1388,7 +1388,7 @@ void X86Mir2Lir::AssignOffsets() {
int offset = AssignInsnOffsets();
/* Const values have to be word aligned */
- offset = (offset + 3) & ~3;
+ offset = RoundUp(offset, 4);
/* Set up offsets for literals */
data_offset_ = offset;
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 18a5bce77d..032eabc7df 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "mem_map.h"
+#include "utils.h"
namespace art {
@@ -155,7 +156,7 @@ class ArenaAllocator : private ArenaAllocatorStats {
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
- bytes = (bytes + 3) & ~3;
+ bytes = RoundUp(bytes, 4);
if (UNLIKELY(ptr_ + bytes > end_)) {
// Obtain a new block.
ObtainNewArenaForAllocation(bytes);
diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc
index bd78eaef0d..b8b0e6ef7d 100644
--- a/compiler/utils/scoped_arena_allocator.cc
+++ b/compiler/utils/scoped_arena_allocator.cc
@@ -92,7 +92,7 @@ void ArenaStack::UpdateBytesAllocated() {
}
void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
- size_t rounded_bytes = (bytes + kValgrindRedZoneBytes + 3) & ~3;
+ size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 4);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 28e86ec005..d5b003ca4d 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -67,7 +67,7 @@ class ArenaStack : private DebugStackRefCounter {
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
- size_t rounded_bytes = (bytes + 3) & ~3;
+ size_t rounded_bytes = RoundUp(bytes, 4);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);