Object model changes to support 64bit.
Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.
Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.
Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index f478366..20532f4 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -282,7 +282,7 @@
};
template<FindFieldType type, bool access_check>
-static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
+static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
Thread* self, size_t expected_size) {
bool is_primitive;
bool is_set;
@@ -321,8 +321,7 @@
return nullptr; // Failure.
} else {
FieldHelper fh(resolved_field);
- if (UNLIKELY(fh.IsPrimitiveType() != is_primitive ||
- fh.FieldSize() != expected_size)) {
+ if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || fh.FieldSize() != expected_size)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
DCHECK(throw_location.GetMethod() == referrer);
self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
@@ -358,7 +357,7 @@
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
- const mirror::ArtMethod* referrer, \
+ mirror::ArtMethod* referrer, \
Thread* self, size_t expected_size) \
#define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
@@ -496,7 +495,7 @@
// Fast path field resolution that can't initialize classes or throw exceptions.
static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
FindFieldType type, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* resolved_field =
@@ -552,7 +551,7 @@
// Fast path method resolution that can't throw exceptions.
static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
mirror::Object* this_object,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
bool access_check, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
bool is_direct = type == kStatic || type == kDirect;
@@ -593,7 +592,7 @@
}
static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self, bool can_run_clinit,
bool verify_access)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -630,7 +629,7 @@
extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-static inline mirror::String* ResolveStringFromCode(const mirror::ArtMethod* referrer,
+static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -719,21 +718,21 @@
return reinterpret_cast<void*>(art_portable_to_interpreter_bridge);
}
+static inline const void* GetPortableToQuickBridge() {
+ // TODO: portable to quick bridge. Bug: 8196384
+ return GetPortableToInterpreterBridge();
+}
+
extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
static inline const void* GetQuickToInterpreterBridge() {
return reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
}
-// Return address of interpreter stub.
-static inline const void* GetCompiledCodeToInterpreterBridge() {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return GetPortableToInterpreterBridge();
-#else
+static inline const void* GetQuickToPortableBridge() {
+ // TODO: quick to portable bridge. Bug: 8196384
return GetQuickToInterpreterBridge();
-#endif
}
-
static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) {
return class_linker->GetPortableResolutionTrampoline();
}
@@ -742,15 +741,6 @@
return class_linker->GetQuickResolutionTrampoline();
}
-// Return address of resolution trampoline stub for defined compiler.
-static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return GetPortableResolutionTrampoline(class_linker);
-#else
- return GetQuickResolutionTrampoline(class_linker);
-#endif
-}
-
static inline const void* GetPortableImtConflictTrampoline(ClassLinker* class_linker) {
return class_linker->GetPortableImtConflictTrampoline();
}
@@ -759,15 +749,6 @@
return class_linker->GetQuickImtConflictTrampoline();
}
-// Return address of imt conflict trampoline stub for defined compiler.
-static inline const void* GetImtConflictTrampoline(ClassLinker* class_linker) {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return GetPortableImtConflictTrampoline(class_linker);
-#else
- return GetQuickImtConflictTrampoline(class_linker);
-#endif
-}
-
extern "C" void art_portable_proxy_invoke_handler();
static inline const void* GetPortableProxyInvokeHandler() {
return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
@@ -778,14 +759,6 @@
return reinterpret_cast<void*>(art_quick_proxy_invoke_handler);
}
-static inline const void* GetProxyInvokeHandler() {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return GetPortableProxyInvokeHandler();
-#else
- return GetQuickProxyInvokeHandler();
-#endif
-}
-
extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject);
static inline void* GetJniDlsymLookupStub() {
return reinterpret_cast<void*>(art_jni_dlsym_lookup_stub);
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 0df00c2..8a2ce51 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -45,15 +45,15 @@
}
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
-#if defined(ART_USE_PORTABLE_COMPILER)
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
- method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
-#else
- method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
- (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, mh.GetShorty()[0]);
-#endif
+ if (kUsePortableCompiler) {
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
+ method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
+ } else {
+ method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
+ (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
+ result, mh.GetShorty()[0]);
+ }
}
} // namespace art
diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
index d343c5d..a553a22 100644
--- a/runtime/entrypoints/portable/portable_cast_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
@@ -20,16 +20,16 @@
namespace art {
-extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type,
- const mirror::Class* src_type)
+extern "C" int32_t art_portable_is_assignable_from_code(mirror::Class* dest_type,
+ mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(dest_type != NULL);
DCHECK(src_type != NULL);
return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
}
-extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type,
- const mirror::Class* src_type)
+extern "C" void art_portable_check_cast_from_code(mirror::Class* dest_type,
+ mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
DCHECK(src_type->IsClass()) << PrettyClass(src_type);
@@ -38,8 +38,8 @@
}
}
-extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element,
- const mirror::Object* array)
+extern "C" void art_portable_check_put_array_element_from_code(mirror::Object* element,
+ mirror::Object* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (element == NULL) {
return;
diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc
index 095e99e..0b54b9c 100644
--- a/runtime/entrypoints/portable/portable_field_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc
@@ -65,13 +65,13 @@
mirror::Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
}
field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
@@ -113,12 +113,12 @@
mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
@@ -167,13 +167,13 @@
mirror::Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
field->SetObj(obj, new_value);
return 0;
}
field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
field->SetObj(obj, new_value);
return 0;
@@ -218,12 +218,12 @@
mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(obj);
}
field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(obj);
}
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
index 8cf4eed..1005d0e 100644
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -44,7 +44,7 @@
return; // Error
}
uint32_t size_in_bytes = payload->element_count * payload->element_width;
- memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ memcpy(array->GetRawData(payload->element_width, 0), payload->data, size_in_bytes);
}
} // namespace art
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
index 47ccbb1..d34b097 100644
--- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -34,7 +34,7 @@
}
}
DCHECK(!thread->IsExceptionPending());
- const void* code = method->GetEntryPointFromCompiledCode();
+ const void* code = method->GetEntryPointFromPortableCompiledCode();
// When we return, the caller will branch to this address, so it had better not be 0!
if (UNLIKELY(code == NULL)) {
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
index 2a0df9b..1fdb832 100644
--- a/runtime/entrypoints/portable/portable_throw_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
@@ -75,7 +75,7 @@
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
// Check for special deoptimization exception.
- if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
+ if (UNLIKELY(reinterpret_cast<intptr_t>(exception) == -1)) {
return -1;
}
mirror::Class* exception_type = exception->GetClass();
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 2162dcc..55fd301 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -47,6 +47,11 @@
#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
#define PORTABLE_STACK_ARG_SKIP 4
+#elif defined(__x86_64__)
+// TODO: implement and check these.
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 16
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 96
+#define PORTABLE_STACK_ARG_SKIP 0
#else
#error "Unsupported architecture"
#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
@@ -387,43 +392,42 @@
// Incompatible class change should have been handled in resolve method.
CHECK(!called->CheckIncompatibleClassChange(invoke_type));
}
- const void* code = NULL;
+ const void* code = nullptr;
if (LIKELY(!thread->IsExceptionPending())) {
// Ensure that the called method's class is initialized.
SirtRef<mirror::Class> called_class(thread, called->GetDeclaringClass());
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromCompiledCode();
+ code = called->GetEntryPointFromPortableCompiledCode();
// TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
+ if (code == nullptr) {
+ bool have_portable_code;
+ code = linker->GetPortableOatCodeFor(called, &have_portable_code);
}
} else if (called_class->IsInitializing()) {
if (invoke_type == kStatic) {
// Class is still initializing, go to oat and grab code (trampoline must be left in place
// until class is initialized to stop races between threads).
- code = linker->GetOatCodeFor(called);
+ bool have_portable_code;
+ code = linker->GetPortableOatCodeFor(called, &have_portable_code);
} else {
// No trampoline for non-static methods.
- code = called->GetEntryPointFromCompiledCode();
+ code = called->GetEntryPointFromPortableCompiledCode();
// TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
+ if (code == nullptr) {
+ bool have_portable_code;
+ code = linker->GetPortableOatCodeFor(called, &have_portable_code);
}
}
} else {
DCHECK(called_class->IsErroneous());
}
}
- if (LIKELY(code != NULL)) {
+ if (LIKELY(code != nullptr)) {
// Expect class to at least be initializing.
DCHECK(called->GetDeclaringClass()->IsInitializing());
// Don't want infinite recursion.
- DCHECK(code != GetResolutionTrampoline(linker));
+ DCHECK(code != GetPortableResolutionTrampoline(linker));
// Set up entry into main method
*called_addr = called;
}
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index ae53d6c..a6ab69b 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -20,8 +20,7 @@
namespace art {
// Assignable test for code, won't throw. Null and equality tests already performed
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class)
+extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(klass != NULL);
DCHECK(ref_class != NULL);
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 003047a..ab428a5 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -38,7 +38,7 @@
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
@@ -47,7 +47,7 @@
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 0a533bd..93ff7aa 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
@@ -43,7 +43,7 @@
}
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
@@ -60,17 +60,17 @@
}
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, self,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
return field->GetObj(field->GetDeclaringClass());
}
@@ -78,7 +78,7 @@
}
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
@@ -101,7 +101,7 @@
}
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
@@ -124,18 +124,18 @@
}
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- const mirror::ArtMethod* referrer,
+ mirror::ArtMethod* referrer,
Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL && obj != NULL)) {
return field->GetObj(obj);
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, self,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
if (UNLIKELY(obj == NULL)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -148,7 +148,7 @@
}
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
@@ -166,7 +166,7 @@
return -1; // failure
}
-extern "C" int artSet64StaticFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
+extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
uint64_t new_value, Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
@@ -185,11 +185,11 @@
}
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
if (LIKELY(!FieldHelper(field).IsPrimitiveType())) {
field->SetObj(field->GetDeclaringClass(), new_value);
@@ -198,7 +198,7 @@
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, self,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
field->SetObj(field->GetDeclaringClass(), new_value);
return 0; // success
@@ -207,7 +207,7 @@
}
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
@@ -261,18 +261,18 @@
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
- const mirror::ArtMethod* referrer, Thread* self,
+ mirror::ArtMethod* referrer, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL && obj != NULL)) {
field->SetObj(obj, new_value);
return 0; // success
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, self,
- sizeof(mirror::Object*));
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != NULL)) {
if (UNLIKELY(obj == NULL)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index ca0c92e..8dac750 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -56,7 +56,7 @@
return -1; // Error
}
uint32_t size_in_bytes = payload->element_count * payload->element_width;
- memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ memcpy(array->GetRawData(payload->element_width, 0), payload->data, size_in_bytes);
return 0; // Success
}
diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
index 5a1b3e8..c081768 100644
--- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
@@ -124,21 +124,23 @@
return 0; // Failure.
}
}
- const void* code = method->GetEntryPointFromCompiledCode();
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
-#ifndef NDEBUG
// When we return, the caller will branch to this address, so it had better not be 0!
- if (UNLIKELY(code == NULL)) {
+ if (kIsDebugBuild && UNLIKELY(code == nullptr)) {
MethodHelper mh(method);
LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
<< " location: " << mh.GetDexFile().GetLocation();
}
-#endif
-
+#ifdef __LP64__
+ UNIMPLEMENTED(FATAL);
+ return 0;
+#else
uint32_t method_uint = reinterpret_cast<uint32_t>(method);
uint64_t code_uint = reinterpret_cast<uint32_t>(code);
uint64_t result = ((code_uint << 32) | method_uint);
return result;
+#endif
}
template<InvokeType type, bool access_check>
@@ -156,21 +158,23 @@
}
}
DCHECK(!self->IsExceptionPending());
- const void* code = method->GetEntryPointFromCompiledCode();
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
-#ifndef NDEBUG
// When we return, the caller will branch to this address, so it had better not be 0!
- if (UNLIKELY(code == NULL)) {
+ if (kIsDebugBuild && UNLIKELY(code == NULL)) {
MethodHelper mh(method);
LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
<< " location: " << mh.GetDexFile().GetLocation();
}
-#endif
-
+#ifdef __LP64__
+ UNIMPLEMENTED(FATAL);
+ return 0;
+#else
uint32_t method_uint = reinterpret_cast<uint32_t>(method);
uint64_t code_uint = reinterpret_cast<uint32_t>(code);
uint64_t result = ((code_uint << 32) | method_uint);
return result;
+#endif
}
// Explicit template declarations of artInvokeCommon for all invoke types.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b589384..9f30190 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -97,6 +97,12 @@
#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 28
#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__x86_64__)
+// TODO: implement and check these.
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 56
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define QUICK_STACK_ARG_SKIP 32
#else
#error "Unsupported architecture"
#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
@@ -567,15 +573,15 @@
SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass());
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromCompiledCode();
+ code = called->GetEntryPointFromQuickCompiledCode();
} else if (called_class->IsInitializing()) {
if (invoke_type == kStatic) {
// Class is still initializing, go to oat and grab code (trampoline must be left in place
// until class is initialized to stop races between threads).
- code = linker->GetOatCodeFor(called);
+ code = linker->GetQuickOatCodeFor(called);
} else {
// No trampoline for non-static methods.
- code = called->GetEntryPointFromCompiledCode();
+ code = called->GetEntryPointFromQuickCompiledCode();
}
} else {
DCHECK(called_class->IsErroneous());