ART: Convert pointer size to enum
Move away from size_t to dedicated enum (class).
Bug: 30373134
Bug: 30419309
Test: m test-art-host
Change-Id: Id453c330f1065012e7d4f9fc24ac477cc9bb9269
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index bf29e1c..06a39b2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/quick_compiler_callbacks.h"
@@ -115,7 +116,7 @@
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
MakeExecutable(&m);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 951b075..8d53dbf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -843,13 +843,14 @@
}
}
-uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
+uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index,
+ PointerSize pointer_size) {
ReaderMutexLock mu(Thread::Current(), lock_);
auto it = inline_methods_.find(method_index);
if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
- return string_init_base_offset + it->second.d.data * pointer_size;
+ return string_init_base_offset + it->second.d.data * static_cast<size_t>(pointer_size);
}
return 0;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 50dc032..dbdfa24 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -18,6 +18,8 @@
#define ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_
#include <stdint.h>
+
+#include "base/enums.h"
#include "base/mutex.h"
#include "base/macros.h"
#include "safe_map.h"
@@ -82,7 +84,7 @@
/**
* Gets the thread pointer entrypoint offset for a string init method index and pointer size.
*/
- uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
+ uint32_t GetOffsetForStringInit(uint32_t method_index, PointerSize pointer_size)
REQUIRES(!lock_);
/**
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index bace014..4bcd59a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -21,6 +21,7 @@
#include <vector>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file.h"
@@ -169,7 +170,7 @@
continue;
}
auto* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
if (abstract_method == nullptr) {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 94f5acc..3a260f5 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "dex_compilation_unit.h"
#include "mirror/class_loader.h"
@@ -336,7 +337,7 @@
methods_declaring_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ const PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
// TODO We should be able to sharpen if we are going into the boot image as well.
bool can_sharpen_super_based_on_type = same_dex_file &&
(*invoke_type == kSuper) &&
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 4c0095d..8286033 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -27,6 +27,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -434,10 +435,10 @@
#define CREATE_TRAMPOLINE(type, abi, offset) \
if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(8, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \
} else { \
return CreateTrampoline32(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(4, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \
}
std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
@@ -1015,7 +1016,7 @@
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle, size_t pointer_size)
+ void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
@@ -1147,7 +1148,7 @@
// Make a copy of the handle so that we don't clobber it doing Assign.
MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
std::string temp;
- const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
std::pair<std::unordered_set<std::string>::iterator, bool> result =
@@ -2885,7 +2886,7 @@
bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
- size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
*offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
return inliner->IsStringInitMethodIndex(method_index);
}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 38ac052..e223534 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -17,6 +17,7 @@
#include <memory>
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -100,11 +101,11 @@
CHECK_ALIGNED(stack_maps_offset, 2);
}
- method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
+ method_f_ = my_klass_->FindVirtualMethod("f", "()I", kRuntimePointerSize);
ASSERT_TRUE(method_f_ != nullptr);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
- method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*));
+ method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", kRuntimePointerSize);
ASSERT_TRUE(method_g_ != nullptr);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 063eb11..7a34683 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1416,7 +1416,7 @@
}
case kBinImTable:
case kBinIMTConflictTable: {
- bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_));
break;
}
default: {
@@ -1573,7 +1573,7 @@
boot_image_end - boot_image_begin,
boot_oat_begin,
boot_oat_end - boot_oat_begin,
- target_ptr_size_,
+ static_cast<uint32_t>(target_ptr_size_),
compile_pic_,
/*is_pic*/compile_app_image_,
image_storage_mode_,
@@ -2029,7 +2029,7 @@
if (orig_strings != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
NativeLocationInImage(orig_strings),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2037,7 +2037,7 @@
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2045,7 +2045,7 @@
if (orig_methods != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
NativeLocationInImage(orig_methods),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
@@ -2058,7 +2058,7 @@
if (orig_fields != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
NativeLocationInImage(orig_fields),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1efdc22..626a975 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -28,6 +28,7 @@
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
+#include "base/enums.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "driver/compiler_driver.h"
@@ -524,7 +525,7 @@
const bool compile_app_image_;
// Size of pointers on the target architecture.
- size_t target_ptr_size_;
+ PointerSize target_ptr_size_;
// Image data indexed by the oat file index.
dchecked_vector<ImageInfo> image_infos_;
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 29411f0..0d16260 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm {
+static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
+
// Used by hard float.
static const Register kHFCoreArgumentRegisters[] = {
R0, R1, R2, R3
@@ -255,7 +257,7 @@
ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArmPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register r2.
size_t padding = 0;
@@ -287,9 +289,10 @@
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = static_cast<size_t>(kArmPointerSize)
+ + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
@@ -343,7 +346,8 @@
FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
CHECK_GE(itr_slots_, 4u);
- size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
+ size_t offset =
+ displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
CHECK_LT(offset, OutArgSize());
return FrameOffset(offset);
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 157880b..7c717cc 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~ArmManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ab56c1c..afa707d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm64 {
+static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
+
static const XRegister kXArgumentRegisters[] = {
X0, X1, X2, X3, X4, X5, X6, X7
};
@@ -211,7 +213,7 @@
// JNI calling convention
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArm64PointerSize) {
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
@@ -231,7 +233,7 @@
size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 337e881..90b12e5 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm64 {
-constexpr size_t kFramePointerSize = 8;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index e21f554..c7ed9c9 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -299,7 +299,7 @@
size_t JniCallingConvention::CurrentParamSize() {
if (itr_args_ <= kObjectOrClass) {
- return frame_pointer_size_; // JNIEnv or jobject/jclass
+ return static_cast<size_t>(frame_pointer_size_); // JNIEnv or jobject/jclass
} else {
int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
return ParamSize(arg_pos);
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e8f738d..995fa51 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include "base/arena_object.h"
+#include "base/enums.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
@@ -70,8 +71,10 @@
virtual ~CallingConvention() {}
protected:
- CallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ CallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
@@ -198,7 +201,7 @@
// Space for frames below this on the stack.
FrameOffset displacement_;
// The size of a pointer.
- const size_t frame_pointer_size_;
+ const PointerSize frame_pointer_size_;
// The size of a reference entry within the handle scope.
const size_t handle_scope_pointer_size_;
@@ -255,7 +258,7 @@
ManagedRuntimeCallingConvention(bool is_static,
bool is_synchronized,
const char* shorty,
- size_t frame_pointer_size)
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
};
@@ -328,7 +331,7 @@
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);
+ return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
// above Method reference
}
@@ -356,8 +359,10 @@
kObjectOrClass = 1
};
- JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ JniCallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
// Number of stack slots for outgoing arguments, above which the handle scope is
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 4311a34..277b794 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -23,6 +23,7 @@
#include "art_method.h"
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -125,16 +126,16 @@
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<8>(),
+ Thread::TopHandleScopeOffset<PointerSize::k64>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<4>(),
+ Thread::TopHandleScopeOffset<PointerSize::k32>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
@@ -188,9 +189,9 @@
// 4. Write out the end of the quick frames.
if (is_64_bit_target) {
- __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
+ __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>());
} else {
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
+ __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>());
}
// 5. Move frame down to allow space for out going args.
@@ -201,8 +202,10 @@
// Call the read barrier for the declaring class loaded from the method for a static call.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static) {
- ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni);
- ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni);
+ ThreadOffset32 read_barrier32 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni);
+ ThreadOffset64 read_barrier64 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv.
FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -245,10 +248,14 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
- ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
+ ThreadOffset32 jni_start32 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart);
+ ThreadOffset64 jni_start64 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
@@ -346,17 +353,17 @@
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
if (is_64_bit_target) {
- __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
+ __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>());
} else {
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
+ __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(),
main_jni_conv->InterproceduralScratchRegister());
}
}
@@ -387,7 +394,8 @@
main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value()
+ + static_cast<size_t>(kMipsPointerSize));
}
CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -406,21 +414,27 @@
}
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset<4> jni_end32(-1);
- ThreadOffset<8> jni_end64(-1);
+ ThreadOffset32 jni_end32(-1);
+ ThreadOffset64 jni_end64(-1);
if (reference_return) {
// Pass result.
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -458,9 +472,11 @@
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread64(ThreadOffset64(jni_end64),
+ end_jni_conv->InterproceduralScratchRegister());
} else {
- __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread32(ThreadOffset32(jni_end32),
+ end_jni_conv->InterproceduralScratchRegister());
}
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 3d4d140..f5ab5f7 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -172,7 +172,7 @@
MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register A2.
size_t padding = 0;
@@ -203,10 +203,10 @@
size_t MipsJniCallingConvention::FrameSize() {
// ArtMethod*, RA and callee save area size, local reference segment state
- size_t frame_data_size = kMipsPointerSize +
+ size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 5c128b0..e95a738 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips {
constexpr size_t kFramePointerSize = 4;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
+ "Invalid frame pointer size");
class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~MipsManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index f2e1da8..8341e8e 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -152,7 +152,7 @@
Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
}
uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -172,7 +172,7 @@
size_t frame_data_size = kFramePointerSize +
(CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index 99ea3cd..a5fd111 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips64 {
constexpr size_t kFramePointerSize = 8;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
+ "Invalid frame pointer size");
class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 22c7cd0..1d06f26 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -23,6 +23,8 @@
namespace art {
namespace x86 {
+static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86ManagedRegister::FromCpuRegister(EBP),
@@ -190,7 +192,7 @@
X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86PointerSize) {
}
uint32_t X86JniCallingConvention::CoreSpillMask() const {
@@ -203,10 +205,10 @@
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 9d678b7..ff92fc9 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86 {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize),
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32),
gpr_arg_count_(0) {}
~X86ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index cc4d232..cbf10bd 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -24,6 +24,10 @@
namespace art {
namespace x86_64 {
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+
+static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -136,7 +140,7 @@
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return FrameOffset(displacement_.Int32Value() + // displacement
- kX86_64PointerSize + // Method ref
+ static_cast<size_t>(kX86_64PointerSize) + // Method ref
itr_slots_ * sizeof(uint32_t)); // offset into in args
}
@@ -163,7 +167,7 @@
X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86_64PointerSize) {
}
uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
@@ -176,10 +180,10 @@
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86_64PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86_64PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index e2d3d48..b98f505 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -17,17 +17,19 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86_64 {
-constexpr size_t kFramePointerSize = 8;
-
class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 0762eec..ce044e8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -16,6 +16,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "common_compiler_test.h"
@@ -444,7 +445,8 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(162 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(162 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index b32199f..f20c715 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/allocator.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/file_magic.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -506,7 +507,7 @@
if (!HasBootImage()) {
// Allocate space for app dex cache arrays in the .bss section.
size_t bss_start = RoundUp(size_, kPageSize);
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
bss_size_ = 0u;
for (const DexFile* dex_file : *dex_files_) {
dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
@@ -941,7 +942,7 @@
}
protected:
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
};
class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
@@ -1149,7 +1150,8 @@
if (UNLIKELY(target_offset == 0)) {
ArtMethod* target = GetTargetMethod(patch);
DCHECK(target != nullptr);
- size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
+ PointerSize size =
+ GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
if (oat_code_offset != 0) {
DCHECK(!writer_->HasBootImage());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3269dc6..4a4b98c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -137,7 +137,7 @@
size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
- return pointer_size * index;
+ return static_cast<size_t>(pointer_size) * index;
}
uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 62dd1cc..ad02ecf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -22,6 +22,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/bit_field.h"
+#include "base/enums.h"
#include "compiled_method.h"
#include "driver/compiler_options.h"
#include "globals.h"
@@ -191,7 +192,7 @@
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
// Note that this follows the current calling convention.
return GetFrameSize()
- + InstructionSetPointerSize(GetInstructionSet()) // Art method
+ + static_cast<size_t>(InstructionSetPointerSize(GetInstructionSet())) // Art method
+ parameter->GetIndex() * kVRegSize;
}
@@ -357,14 +358,14 @@
static uint32_t GetArrayDataOffset(HArrayGet* array_get);
// Return the entry point offset for ReadBarrierMarkRegX, where X is `reg`.
- template <size_t pointer_size>
+ template <PointerSize pointer_size>
static int32_t GetReadBarrierMarkEntryPointsOffset(size_t reg) {
// The entry point list defines 30 ReadBarrierMarkRegX entry points.
DCHECK_LT(reg, 30u);
// The ReadBarrierMarkRegX entry points are ordered by increasing
// register number in Thread::tls_Ptr_.quick_entrypoints.
return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
- + pointer_size * reg;
+ + static_cast<size_t>(pointer_size) * reg;
}
void EmitParallelMoves(Location from1,
@@ -700,7 +701,7 @@
size_t number_of_registers,
const F* fpu_registers,
size_t number_of_fpu_registers,
- size_t pointer_size)
+ PointerSize pointer_size)
: registers_(registers),
number_of_registers_(number_of_registers),
fpu_registers_(fpu_registers),
@@ -723,7 +724,7 @@
size_t GetStackOffsetOf(size_t index) const {
// We still reserve the space for parameters passed by registers.
// Add space for the method pointer.
- return pointer_size_ + index * kVRegSize;
+ return static_cast<size_t>(pointer_size_) + index * kVRegSize;
}
private:
@@ -731,7 +732,7 @@
const size_t number_of_registers_;
const F* fpu_registers_;
const size_t number_of_fpu_registers_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 124a61f..c18b793 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -61,7 +61,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
public:
@@ -459,7 +459,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmWordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(reg);
// This runtime call does not require a stack map.
arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ b(GetExitLabel());
@@ -966,7 +966,7 @@
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpops(start_register, POPCOUNT(fpu_spill_mask_));
- __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kArmPointerSize) * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
// Pop LR into PC to return.
@@ -1218,7 +1218,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1939,7 +1939,7 @@
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value();
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
@@ -3530,7 +3530,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, LR, temp, code_offset.Int32Value());
__ blx(LR);
@@ -4945,7 +4945,7 @@
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
- __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
+ __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ strb(card, Address(card, temp));
if (can_be_null) {
@@ -4996,7 +4996,7 @@
}
__ LoadFromOffset(
- kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmPointerSize>().Int32Value());
if (successor == nullptr) {
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -5577,7 +5577,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
}
void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
@@ -6332,7 +6332,7 @@
// IP = Thread::Current()->GetIsGcMarking()
__ LoadFromOffset(
- kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmWordSize>().Int32Value());
+ kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value());
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
@@ -6691,7 +6691,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_
__ LoadFromOffset(
kLoadWord, LR, callee_method.AsRegister<Register>(),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
// LR()
__ blx(LR);
break;
@@ -6725,7 +6725,7 @@
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value();
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a07a233..f9fcabd 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
+#include "base/enums.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -31,7 +32,7 @@
class CodeGeneratorARM;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArmWordSize = kArmPointerSize;
+static constexpr size_t kArmWordSize = static_cast<size_t>(kArmPointerSize);
static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index efeef7b..115cee6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -133,7 +133,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
@@ -625,7 +625,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64WordSize>(obj_.reg());
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(obj_.reg());
// This runtime call does not require a stack map.
arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ B(GetExitLabel());
@@ -1105,7 +1105,7 @@
if (value_can_be_null) {
__ Cbz(value, &done);
}
- __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64PointerSize>().Int32Value()));
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ Strb(card, MemOperand(card, temp.X()));
if (value_can_be_null) {
@@ -1479,7 +1479,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1562,7 +1562,7 @@
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireW();
- __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
+ __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64PointerSize>().SizeValue()));
if (successor == nullptr) {
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3526,7 +3526,7 @@
Register temp = XRegisterFrom(locations->GetTemp(0));
Location receiver = locations->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -3678,7 +3678,7 @@
// /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ Ldr(reg.X(),
MemOperand(method_reg.X(),
- ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value()));
// temp = temp[index_in_cache];
// Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
uint32_t index_in_cache = invoke->GetDexMethodIndex();
@@ -3710,7 +3710,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
// lr()
__ Blr(lr);
break;
@@ -3730,7 +3730,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
BlockPoolsScope block_pools(GetVIXLAssembler());
@@ -4127,7 +4127,7 @@
}
static MemOperand GetExceptionTlsAddress() {
- return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+ return MemOperand(tr, Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
}
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
@@ -4440,7 +4440,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Location temp = instruction->GetLocations()->GetTemp(0);
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
__ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
__ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
__ Blr(lr);
@@ -5096,7 +5096,7 @@
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
// temp = Thread::Current()->GetIsGcMarking()
- __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 03f5a33..240936c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -40,7 +40,7 @@
class CodeGeneratorARM64;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArm64WordSize = kArm64PointerSize;
+static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
static const vixl::aarch64::Register kParameterCoreRegisters[] = {
vixl::aarch64::x1,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 334d30d..8dd82ef 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
@@ -505,7 +505,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -1147,7 +1147,7 @@
__ LoadFromOffset(kLoadWord,
card,
TR,
- Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -1239,7 +1239,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path,
@@ -1290,7 +1290,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnez(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3949,7 +3949,7 @@
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
@@ -4287,7 +4287,7 @@
T9,
callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMipsWordSize).Int32Value());
+ kMipsPointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -4320,7 +4320,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// temp = object->GetClass();
DCHECK(receiver.IsRegister());
@@ -4520,7 +4520,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
}
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
@@ -4883,7 +4883,7 @@
// Move an uint16_t value to a register.
__ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
@@ -4909,7 +4909,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
@@ -4917,7 +4917,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 29b8c20..3472830 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -104,7 +104,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
@@ -431,7 +431,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -888,7 +888,7 @@
__ LoadFromOffset(kLoadDoubleword,
card,
TR,
- Thread::CardTableOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
__ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
__ Daddu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -964,7 +964,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMips64DoublewordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1004,7 +1004,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMips64PointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnezc(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -2934,7 +2934,7 @@
GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
@@ -3115,7 +3115,7 @@
T9,
callee_method.AsRegister<GpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMips64DoublewordSize).Int32Value());
+ kMips64PointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -3153,7 +3153,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// temp = object->GetClass();
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
@@ -3231,7 +3231,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMips64PointerSize>().Int32Value();
}
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
@@ -3456,7 +3456,7 @@
// String is allocated through StringFactory. Call NewEmptyString entry point.
GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
MemberOffset code_offset =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
__ LoadFromOffset(kLoadDoubleword, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadDoubleword, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 528e94f..a2fa245 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -49,7 +49,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
public:
@@ -492,7 +492,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86WordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(reg);
// This runtime call does not require a stack map.
x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
@@ -803,7 +803,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -2094,7 +2094,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -4034,7 +4034,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize);
__ fs()->movl(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString)));
__ call(Address(temp, code_offset.Int32Value()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -4451,7 +4451,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86WordSize).Int32Value()));
+ kX86PointerSize).Int32Value()));
break;
}
@@ -4485,7 +4485,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(
- temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
}
void CodeGeneratorX86::RecordSimplePatch() {
@@ -4589,7 +4589,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value()));
+ __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86PointerSize>().Int32Value()));
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0),
@@ -5681,7 +5681,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -6277,7 +6277,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+ return Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>().Int32Value());
}
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
@@ -6994,7 +6994,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 39ea7d5..f306b33 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
#include "arch/x86/instruction_set_features_x86.h"
+#include "base/enums.h"
#include "code_generator.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -28,7 +29,7 @@
namespace x86 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86WordSize = kX86PointerSize;
+static constexpr size_t kX86WordSize = static_cast<size_t>(kX86PointerSize);
class CodeGeneratorX86;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0f0129b..5d5fa85 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -53,7 +53,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
public:
@@ -513,7 +513,7 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64WordSize>(reg);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(reg);
// This runtime call does not require a stack map.
x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
@@ -883,7 +883,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<CpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
break;
}
@@ -918,7 +918,7 @@
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
}
void CodeGeneratorX86_64::RecordSimplePatch() {
@@ -1031,7 +1031,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86_64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -2323,8 +2323,8 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize).SizeValue()));
+ __ call(Address(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -3962,7 +3962,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
CpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize);
__ gs()->movq(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString), /* no_rip */ true));
__ call(Address(temp, code_offset.SizeValue()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -5118,7 +5118,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
@@ -5170,7 +5170,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
if (successor == nullptr) {
@@ -5687,7 +5687,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(),
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true);
}
@@ -6445,7 +6445,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index fbb78bc..4e0e34c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -28,7 +28,7 @@
namespace x86_64 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
+static constexpr size_t kX86_64WordSize = static_cast<size_t>(kX86_64PointerSize);
// Some x86_64 instructions require a register to be available as temp.
static constexpr Register TMP = R11;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a592162..31cf29a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -17,6 +17,7 @@
#include "inliner.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "builder.h"
#include "class_linker.h"
#include "constant_folding.h"
@@ -151,7 +152,7 @@
}
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
if (invoke->IsInvokeInterface()) {
resolved_method = info.GetTypeHandle()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -243,7 +244,7 @@
~ScopedProfilingInfoInlineUse() {
if (profiling_info_ != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size));
Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_);
}
@@ -390,7 +391,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
if (invoke_instruction->IsInvokeInterface()) {
resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -482,7 +483,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
@@ -644,7 +645,7 @@
return false;
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
DCHECK(resolved_method != nullptr);
ArtMethod* actual_method = nullptr;
@@ -1004,7 +1005,7 @@
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
// Check whether the field is final. If it is, we need to add a barrier.
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
if (resolved_field->IsFinal()) {
@@ -1030,7 +1031,7 @@
uint32_t field_index,
HInstruction* obj)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
@@ -1058,7 +1059,7 @@
HInstruction* obj,
HInstruction* value)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
@@ -1397,7 +1398,7 @@
}
}
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
@@ -1454,7 +1455,7 @@
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */, pointer_size);
return_replacement->SetReferenceTypeInfo(GetClassRTI(cls));
}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 5ab9389..be061f5 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1201,7 +1201,7 @@
}
__ LoadFromOffset(kLoadWord, LR, TR,
- QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ blx(LR);
@@ -1270,8 +1270,10 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), EQ);
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1298,8 +1300,10 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1325,7 +1329,7 @@
__ b(slow_path->GetEntryLabel(), EQ);
__ LoadFromOffset(kLoadWord,
- LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
+ LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1718,7 +1722,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
@@ -1744,7 +1748,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 987d3f8..06d1148 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -771,7 +771,7 @@
void IntrinsicCodeGeneratorARM64::VisitThreadCurrentThread(HInvoke* invoke) {
codegen_->Load(Primitive::kPrimNot, WRegisterFrom(invoke->GetLocations()->Out()),
- MemOperand(tr, Thread::PeerOffset<8>().Int32Value()));
+ MemOperand(tr, Thread::PeerOffset<kArm64PointerSize>().Int32Value()));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -1398,7 +1398,7 @@
__ Mov(tmp_reg, 0);
}
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pIndexOf).Int32Value()));
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Blr(lr);
@@ -1468,7 +1468,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromBytes).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1496,7 +1497,8 @@
//
// all include a null check on `data` before calling that method.
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromChars).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1522,7 +1524,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromString).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1563,7 +1566,8 @@
MacroAssembler* masm,
CodeGeneratorARM64* codegen,
QuickEntrypointEnum entry) {
- __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64WordSize>(entry).Int32Value()));
+ __ Ldr(lr, MemOperand(tr,
+ GetThreadOffset<kArm64PointerSize>(entry).Int32Value()));
__ Blr(lr);
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 0bfa025..9449f79 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1899,8 +1899,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize,
- pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -2059,7 +2058,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pIndexOf).Int32Value());
__ Jalr(T9);
__ Nop();
@@ -2145,7 +2144,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromBytes).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromBytes).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2178,7 +2177,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromChars).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromChars).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2207,7 +2206,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromString).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromString).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index dfaa84e..8d4d3e5 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1543,7 +1543,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -1694,7 +1694,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Jalr(T9);
__ Nop();
@@ -1771,7 +1771,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Jalr(T9);
@@ -1805,7 +1805,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Jalr(T9);
@@ -1836,7 +1836,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Jalr(T9);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 6c81421..65f4def 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -857,7 +857,7 @@
}
// Now do the actual call.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(entry)));
+ __ fs()->call(Address::Absolute(GetThreadOffset<kX86PointerSize>(entry)));
// Extract the return value from the FP stack.
__ fstpl(Address(ESP, 0));
@@ -1237,7 +1237,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pStringCompareTo)));
__ Bind(slow_path->GetExitLabel());
}
@@ -1510,7 +1510,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromBytes)));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1536,7 +1536,7 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromChars)));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1560,7 +1560,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ __ fs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromString)));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1801,7 +1802,7 @@
void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
Register out = invoke->GetLocations()->Out().AsRegister<Register>();
- GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
+ GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86PointerSize>()));
}
static void GenUnsafeGet(HInvoke* invoke,
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 28f1f4f..7e0d729 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -720,7 +720,7 @@
DCHECK(invoke->IsInvokeStaticOrDirect());
X86_64Assembler* assembler = codegen->GetAssembler();
- __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64WordSize>(entry), true));
+ __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64PointerSize>(entry), true));
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1324,7 +1324,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pStringCompareTo),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pStringCompareTo),
/* no_rip */ true));
__ Bind(slow_path->GetExitLabel());
}
@@ -1597,7 +1597,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromBytes),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1624,7 +1625,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromChars),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1649,7 +1651,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromString),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1875,7 +1878,7 @@
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
- GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64WordSize>(),
+ GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
/* no_rip */ true));
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 965d5ee..e96ab19 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -16,6 +16,7 @@
#include "reference_type_propagation.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -775,7 +776,7 @@
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache =
FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index a9151ba..768ed2d 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include "base/bit_vector-inl.h"
+#include "base/enums.h"
#include "code_generator.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
@@ -77,8 +78,8 @@
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
// ArtMethod* takes 2 vregs for 64 bits.
- reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize +
- codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
+ size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
+ reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 97f34e6..b73f738 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -17,6 +17,7 @@
#include "sharpening.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
#include "driver/dex_compilation_unit.h"
@@ -259,7 +260,7 @@
load_class->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadClass::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.TypeOffset(type_index);
load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
@@ -358,7 +359,7 @@
load_string->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadString::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.StringOffset(string_index);
load_string->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1ee1c4d..304e56b 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -50,7 +50,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
namespace arm {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
Thumb2Assembler assembler(arena);
switch (abi) {
@@ -80,7 +80,7 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Arm64Assembler assembler(arena);
switch (abi) {
@@ -119,7 +119,7 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
MipsAssembler assembler(arena);
switch (abi) {
@@ -151,7 +151,7 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Mips64Assembler assembler(arena);
switch (abi) {
@@ -183,7 +183,7 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
X86Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in fs.
@@ -204,7 +204,7 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
x86_64::X86_64Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in gs.
@@ -224,7 +224,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
@@ -250,7 +250,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index 8f823f1..1a10e4c 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@
// Create code that will invoke the function held in thread local storage.
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> entry_point_offset);
+ ThreadOffset32 entry_point_offset);
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> entry_point_offset);
+ ThreadOffset64 entry_point_offset);
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a7f4547..1796b39 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -384,7 +384,7 @@
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
-constexpr size_t kFramePointerSize = kArmPointerSize;
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
void ArmAssembler::BuildFrame(size_t frame_size,
ManagedRegister method_reg,
@@ -568,8 +568,9 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadImmediate(scratch.AsCoreRegister(), imm);
@@ -600,19 +601,19 @@
return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
}
-void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
+void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
}
-void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
+void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) {
ArmManagedRegister dst = m_dst.AsArm();
CHECK(dst.IsCoreRegister()) << dst;
LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
}
void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -621,9 +622,9 @@
SP, fr_offs.Int32Value());
}
-void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -632,9 +633,9 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
@@ -642,7 +643,7 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
}
@@ -831,7 +832,8 @@
// TODO: place reference map on call
}
-void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
+void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
@@ -848,8 +850,10 @@
ArmManagedRegister scratch = mscratch.AsArm();
ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, Thread::ExceptionOffset<4>().Int32Value());
+ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
b(slow->Entry(), NE);
}
@@ -865,7 +869,10 @@
// Don't care about preserving R0 as this call won't return.
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
__ blx(R12);
#undef __
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 9cf72a2..2b7414d 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -904,13 +904,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -918,7 +918,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -927,15 +927,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -990,7 +990,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 9f2027f..d82caf5 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -164,24 +164,25 @@
offs.Int32Value());
}
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadImmediate(scratch.AsXRegister(), imm);
StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
}
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
+void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
UseScratchRegisterScope temps(vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
@@ -285,7 +286,7 @@
return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
}
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
+void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
@@ -318,7 +319,7 @@
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
+void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
@@ -355,17 +356,17 @@
}
void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> tr_offs,
- ManagedRegister m_scratch) {
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
}
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
@@ -542,7 +543,8 @@
___ Blr(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
+void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
@@ -612,7 +614,9 @@
CHECK_ALIGNED(stack_adjust, kStackAlignment);
Arm64ManagedRegister scratch = m_scratch.AsArm64();
exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
@@ -629,7 +633,9 @@
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
___ Blr(temp);
// Call should never return.
@@ -720,7 +726,7 @@
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
IncreaseFrameSize(frame_size);
// Save callee-saves.
@@ -734,7 +740,7 @@
StoreToOffset(X0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + kArm64PointerSize;
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
@@ -776,7 +782,7 @@
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
DCHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index a481544..24b7982 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -126,28 +126,28 @@
void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
@@ -200,7 +200,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index e6c3a18..0a1b733 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -162,90 +162,94 @@
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ ThreadOffset32 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ ThreadOffset64 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset32 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset64 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 80aa630..89f7947 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
#include "arm/constants_arm.h"
#include "base/arena_allocator.h"
#include "base/arena_object.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
@@ -382,8 +383,7 @@
const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+ virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
virtual void IncreaseFrameSize(size_t adjust) = 0;
virtual void DecreaseFrameSize(size_t adjust) = 0;
@@ -393,23 +393,24 @@
virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister scratch) = 0;
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
- virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
+ virtual void StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
+ virtual void StoreImmediateToThread64(ThreadOffset64 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
+ virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+ virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
+ virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
+ virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
FrameOffset in_off, ManagedRegister scratch) = 0;
@@ -417,8 +418,8 @@
// Load routines
virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
+ virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
+ virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
// If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
@@ -427,24 +428,27 @@
virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
+ virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
+ virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
// Copying routines
virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister scratch) = 0;
+ virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
@@ -474,24 +478,26 @@
// Exploit fast access in managed code to Thread::Current()
virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) = 0;
// src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) = 0;
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
@@ -499,12 +505,10 @@
virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
// Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
+ virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
+ virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 608b3bc..e6b32de 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips {
+static_assert(static_cast<size_t>(kMipsPointerSize) == kMipsWordSize,
+ "Unexpected Mips pointer size.");
+static_assert(kMipsPointerSize == PointerSize::k32, "Unexpected Mips pointer size.");
+
+
std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
if (rhs >= D0 && rhs < kNumberOfDRegisters) {
os << "d" << static_cast<int>(rhs);
@@ -2794,7 +2799,8 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm,
+void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2803,7 +2809,7 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
}
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -2813,7 +2819,7 @@
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) {
+void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -2830,8 +2836,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src, size_t size) {
+void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2859,8 +2864,7 @@
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
-void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> offs) {
+void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -2915,7 +2919,7 @@
}
void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2925,7 +2929,7 @@
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -3099,7 +3103,7 @@
// TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED,
+void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
@@ -3117,7 +3121,7 @@
MipsManagedRegister scratch = mscratch.AsMips();
exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value());
+ S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value());
// TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
// as the NAL instruction (occurring in long R2 branches) may become deprecated.
// For now use common for R2 and R6 instructions as this code must execute on both.
@@ -3135,7 +3139,7 @@
Move(A0, exception->scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException.
LoadFromOffset(kLoadWord, T9, S1,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8367e68..852ced6 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -500,15 +500,15 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest,
+ void StoreImmediateToThread32(ThreadOffset32 dest,
uint32_t imm,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
@@ -518,9 +518,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -531,16 +529,16 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<kMipsWordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
@@ -619,7 +617,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<kMipsWordSize> offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 447ede5..3fd77a0 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips64 {
+static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize,
+ "Unexpected Mips64 pointer size.");
+static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size.");
+
+
void Mips64Assembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
@@ -2110,7 +2115,7 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2119,7 +2124,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -2136,9 +2141,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) {
+void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2171,8 +2174,7 @@
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
-void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) {
+void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -2217,7 +2219,7 @@
}
void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<kMips64DoublewordSize> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
@@ -2225,7 +2227,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2429,7 +2431,7 @@
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED,
+void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
@@ -2449,7 +2451,7 @@
LoadFromOffset(kLoadDoubleword,
scratch.AsGpuRegister(),
S1,
- Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ExceptionOffset<kMips64PointerSize>().Int32Value());
Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
}
@@ -2466,7 +2468,7 @@
LoadFromOffset(kLoadDoubleword,
T9,
S1,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 0cd0708..1ad05b0 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -383,10 +383,11 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
@@ -394,9 +395,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -405,16 +404,17 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<kMips64DoublewordSize> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
@@ -471,8 +471,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset,
- ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f931d75..87f5647 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2051,21 +2051,20 @@
movl(Address(ESP, dest), Immediate(imm));
}
-void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister) {
+void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) {
fs()->movl(Address::Absolute(dest), Immediate(imm));
}
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
}
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
fs()->movl(Address::Absolute(thr_offs), ESP);
}
@@ -2101,7 +2100,7 @@
}
}
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
+void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
X86ManagedRegister dest = mdest.AsX86();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2111,7 +2110,7 @@
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4)));
+ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
} else if (dest.IsX87Register()) {
if (size == 4) {
fs()->flds(Address::Absolute(src));
@@ -2152,7 +2151,7 @@
}
void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
+ ThreadOffset32 offs) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister());
fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
@@ -2215,17 +2214,17 @@
}
void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
Store(fr_offs, scratch, 4);
}
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
Load(scratch, fr_offs, 4);
@@ -2371,26 +2370,26 @@
call(Address(scratch, offset));
}
-void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) {
+void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
fs()->call(Address::Absolute(offset));
}
void X86Assembler::GetCurrentThread(ManagedRegister tr) {
fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<4>()));
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
}
void X86Assembler::GetCurrentThread(FrameOffset offset,
ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>()));
+ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
movl(Address(ESP, offset), scratch.AsCpuRegister());
}
void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0));
+ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -2403,8 +2402,8 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException)));
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index fa61662..75648f2 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -195,7 +195,7 @@
return result;
}
- static Address Absolute(ThreadOffset<4> addr) {
+ static Address Absolute(ThreadOffset32 addr) {
return Absolute(addr.Int32Value());
}
@@ -652,13 +652,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -666,7 +666,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -675,15 +675,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -740,7 +740,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 3046710..977ce9d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2683,7 +2683,8 @@
}
}
- DCHECK_EQ(kX86_64PointerSize, kFramePointerSize);
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
@@ -2803,12 +2804,11 @@
movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister) {
+void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) {
gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -2817,7 +2817,7 @@
gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
}
-void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
}
@@ -2858,7 +2858,7 @@
}
}
-void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
+void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
X86_64ManagedRegister dest = mdest.AsX86_64();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2907,7 +2907,7 @@
movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
}
-void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) {
+void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister());
gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
@@ -2969,7 +2969,7 @@
}
void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
@@ -2977,7 +2977,7 @@
Store(fr_offs, scratch, 8);
}
-void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -3130,17 +3130,19 @@
call(Address(scratch, offset));
}
-void X86_64Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister /*mscratch*/) {
+void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
gs()->call(Address::Absolute(offset, true));
}
void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
}
void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
}
@@ -3156,7 +3158,7 @@
void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0));
+ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -3169,8 +3171,10 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI), Address::Absolute(Thread::ExceptionOffset<8>(), true));
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), true));
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 361f73c..52e39cf 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -258,7 +258,7 @@
}
// If no_rip is true then the Absolute address isn't RIP relative.
- static Address Absolute(ThreadOffset<8> addr, bool no_rip = false) {
+ static Address Absolute(ThreadOffset64 addr, bool no_rip = false) {
return Absolute(addr.Int32Value(), no_rip);
}
@@ -723,13 +723,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -737,7 +737,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -746,15 +746,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size);
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -812,7 +812,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.