Centralize instruction-set pointer-size, alignment, 64b-width code
in instruction_set.h/cc
This allows to clean up some places that currently make explicit
comparisons.
Change-Id: I0dcc924c52fa53306f706aceea93a2d4a655c5df
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 8e013c1..59ed827 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -82,21 +82,7 @@
}
uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
- switch (instruction_set) {
- case kArm:
- case kThumb2:
- return RoundUp(offset, kArmAlignment);
- case kArm64:
- return RoundUp(offset, kArm64Alignment);
- case kMips:
- return RoundUp(offset, kMipsAlignment);
- case kX86: // Fall-through.
- case kX86_64:
- return RoundUp(offset, kX86Alignment);
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return 0;
- }
+ return RoundUp(offset, GetInstructionSetAlignment(instruction_set));
}
size_t CompiledCode::CodeDelta() const {
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 201dc47..1bf5fce 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -158,7 +158,7 @@
if (cu.instruction_set == kArm) {
cu.instruction_set = kThumb2;
}
- cu.target64 = (cu.instruction_set == kX86_64) || (cu.instruction_set == kArm64);
+ cu.target64 = Is64BitInstructionSet(cu.instruction_set);
cu.compiler = compiler;
// TODO: x86_64 & arm64 are not yet implemented.
CHECK((cu.instruction_set == kThumb2) ||
@@ -166,7 +166,6 @@
(cu.instruction_set == kX86_64) ||
(cu.instruction_set == kMips));
-
/* Adjust this value accordingly once inlining is performed */
cu.num_dalvik_registers = code_item->registers_size_;
// TODO: set this from command line
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 2b20c6f..0ad30be 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -339,7 +339,6 @@
compiler_(Compiler::Create(compiler_kind)),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
- instruction_set_is_64_bit_(instruction_set == kX86_64 || instruction_set == kArm64),
freezing_constructor_lock_("freezing constructor lock"),
compiled_classes_lock_("compiled classes lock"),
compiled_methods_lock_("compiled method lock"),
@@ -448,7 +447,7 @@
}
#define CREATE_TRAMPOLINE(type, abi, offset) \
- if (instruction_set_is_64_bit_) { \
+ if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
type ## _ENTRYPOINT_OFFSET(8, offset)); \
} else { \
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d49523a..d7d40d5 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -692,7 +692,6 @@
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
- const bool instruction_set_is_64_bit_;
// All class references that require
mutable ReaderWriterMutex freezing_constructor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 64508d1..e03aefb 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -64,6 +64,7 @@
if (instruction_set == kThumb2) {
instruction_set = kArm;
}
+ const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
// Calling conventions used to iterate over parameters to method
UniquePtr<JniCallingConvention> main_jni_conv(
JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
@@ -109,7 +110,7 @@
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
Thread::TopSirtOffset<8>(),
mr_conv->InterproceduralScratchRegister());
@@ -171,7 +172,7 @@
}
// 4. Write out the end of the quick frames.
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
__ StoreImmediateToThread64(Thread::TopOfManagedStackPcOffset<8>(), 0,
mr_conv->InterproceduralScratchRegister());
@@ -216,7 +217,7 @@
}
if (main_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64),
main_jni_conv->InterproceduralScratchRegister());
} else {
@@ -226,7 +227,7 @@
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
main_jni_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister());
} else {
__ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister());
@@ -292,14 +293,14 @@
if (main_jni_conv->IsCurrentParamInRegister()) {
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
} else {
__ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
@@ -313,7 +314,7 @@
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
- if (instruction_set == kX86 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
__ SignExtend(main_jni_conv->ReturnRegister(),
@@ -331,7 +332,7 @@
if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
}
CHECK_LT(return_save_location.Uint32Value(), frame_size+main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -380,7 +381,7 @@
}
if (end_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(end_jni_conv->CurrentParamRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64),
end_jni_conv->InterproceduralScratchRegister());
} else {
@@ -390,7 +391,7 @@
} else {
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
} else {
__ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 1d027f9..fe60959 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -314,23 +314,8 @@
// section if the section alignment is greater than kArchAlignment.
void LlvmCompilationUnit::CheckCodeAlign(uint32_t align) const {
InstructionSet insn_set = GetInstructionSet();
- switch (insn_set) {
- case kThumb2:
- case kArm:
- CHECK_LE(align, static_cast<uint32_t>(kArmAlignment));
- break;
-
- case kX86:
- CHECK_LE(align, static_cast<uint32_t>(kX86Alignment));
- break;
-
- case kMips:
- CHECK_LE(align, static_cast<uint32_t>(kMipsAlignment));
- break;
-
- default:
- LOG(FATAL) << "Unknown instruction set: " << insn_set;
- }
+ size_t insn_set_align = GetInstructionSetAlignment(insn_set);
+ CHECK_LE(align, static_cast<uint32_t>(insn_set_align));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index eff2425..dc66e9c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -345,36 +345,6 @@
return offset;
}
-static void DCheckCodeAlignment(size_t offset, InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- DCHECK_ALIGNED(offset, kArmAlignment);
- break;
-
- case kArm64:
- DCHECK_ALIGNED(offset, kArm64Alignment);
- break;
-
- case kMips:
- DCHECK_ALIGNED(offset, kMipsAlignment);
- break;
-
- case kX86_64:
- // Fall-through.
- case kX86:
- DCHECK_ALIGNED(offset, kX86Alignment);
- break;
-
- case kNone:
- // Use a DCHECK instead of FATAL so that in the non-debug case the whole switch can
- // be optimized away.
- DCHECK(false);
- break;
- }
-}
-
size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
size_t __attribute__((unused)) class_def_index,
size_t class_def_method_index,
@@ -406,7 +376,8 @@
} else {
CHECK(quick_code != nullptr);
offset = compiled_method->AlignCode(offset);
- DCheckCodeAlignment(offset, compiled_method->GetInstructionSet());
+ DCHECK_ALIGNED_PARAM(offset,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
uint32_t code_size = quick_code->size() * sizeof(uint8_t);
CHECK_NE(code_size, 0U);
@@ -539,11 +510,7 @@
refs++;
}
}
- InstructionSet trg_isa = compiler_driver_->GetInstructionSet();
- size_t pointer_size = 4;
- if (trg_isa == kArm64 || trg_isa == kX86_64) {
- pointer_size = 8;
- }
+ size_t pointer_size = GetInstructionSetPointerSize(compiler_driver_->GetInstructionSet());
size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
// Get the generic spill masks and base frame size.
@@ -857,7 +824,8 @@
relative_offset += aligned_code_delta;
DCHECK_OFFSET();
}
- DCheckCodeAlignment(relative_offset, compiled_method->GetInstructionSet());
+ DCHECK_ALIGNED_PARAM(relative_offset,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
uint32_t code_size = quick_code->size() * sizeof(uint8_t);
CHECK_NE(code_size, 0U);
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index c964629..73d4279 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -16,8 +16,78 @@
#include "instruction_set.h"
+#include "globals.h"
+#include "base/logging.h" // Logging is required for FATAL in the helper functions.
+
namespace art {
+size_t GetInstructionSetPointerSize(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmPointerSize;
+ case kArm64:
+ return kArm64PointerSize;
+ case kX86:
+ return kX86PointerSize;
+ case kX86_64:
+ return kX86_64PointerSize;
+ case kMips:
+ return kMipsPointerSize;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have pointer size.";
+ return 0;
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ return 0;
+ }
+}
+
+size_t GetInstructionSetAlignment(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmAlignment;
+ case kArm64:
+ return kArm64Alignment;
+ case kX86:
+ // Fall-through.
+ case kX86_64:
+ return kX86Alignment;
+ case kMips:
+ return kMipsAlignment;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have alignment.";
+ return 0;
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ return 0;
+ }
+}
+
+bool Is64BitInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ case kMips:
+ return false;
+
+ case kArm64:
+ case kX86_64:
+ return true;
+
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have bit width.";
+ return 0;
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ return 0;
+ }
+}
+
std::string InstructionSetFeatures::GetFeatureString() const {
std::string result;
if ((mask_ & kHwDiv) != 0) {
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index a08becf..c746e06 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -35,6 +35,10 @@
};
std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
+size_t GetInstructionSetPointerSize(InstructionSet isa);
+size_t GetInstructionSetAlignment(InstructionSet isa);
+bool Is64BitInstructionSet(InstructionSet isa);
+
#if defined(__arm__)
static constexpr InstructionSet kRuntimeISA = kArm;
#elif defined(__aarch64__)
diff --git a/runtime/utils.h b/runtime/utils.h
index dbc3ab7..5def66b 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -62,12 +62,20 @@
return IsAligned<n>(reinterpret_cast<const uintptr_t>(x));
}
+template<typename T>
+static inline bool IsAlignedParam(T x, int n) {
+ return (x & (n - 1)) == 0;
+}
+
#define CHECK_ALIGNED(value, alignment) \
CHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
#define DCHECK_ALIGNED(value, alignment) \
DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
+#define DCHECK_ALIGNED_PARAM(value, alignment) \
+ DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
+
// Check whether an N-bit two's-complement representation can hold value.
static inline bool IsInt(int N, word value) {
CHECK_LT(0, N);