ObjPtr<>-ify entrypoint utils.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing --interpreter
Bug: 31113334
Change-Id: Id05ce5b827b5c11a0fa796bca0b939e29ecf3c5f
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index a18cca4..a30eb23 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -114,9 +114,9 @@
return method;
}
-ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path)
+ALWAYS_INLINE inline ObjPtr<mirror::Class> CheckObjectAlloc(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ bool* slow_path)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_) {
if (UNLIKELY(!klass->IsInstantiable())) {
@@ -154,9 +154,9 @@
}
ALWAYS_INLINE
-inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path)
+inline ObjPtr<mirror::Class> CheckClassInitializedForObjectAlloc(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ bool* slow_path)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_) {
if (UNLIKELY(!klass->IsInitialized())) {
@@ -184,9 +184,9 @@
// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too.
template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline ObjPtr<mirror::Object> AllocObjectFromCode(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
klass = CheckObjectAlloc(klass, self, &slow_path);
if (UNLIKELY(slow_path)) {
@@ -196,18 +196,18 @@
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
return klass->Alloc</*kInstrumented=*/true>(
self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
DCHECK(klass != nullptr);
- return klass->Alloc<kInstrumented>(self, allocator_type).Ptr();
+ return klass->Alloc<kInstrumented>(self, allocator_type);
}
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline ObjPtr<mirror::Object> AllocObjectFromCodeResolved(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
bool slow_path = false;
klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
@@ -228,9 +228,9 @@
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline ObjPtr<mirror::Object> AllocObjectFromCodeInitialized(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object cannot be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
@@ -239,10 +239,10 @@
template <bool kAccessCheck>
ALWAYS_INLINE
-inline mirror::Class* CheckArrayAlloc(dex::TypeIndex type_idx,
- int32_t component_count,
- ArtMethod* method,
- bool* slow_path) {
+inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
+ int32_t component_count,
+ ArtMethod* method,
+ bool* slow_path) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
*slow_path = true;
@@ -267,7 +267,7 @@
return nullptr; // Failure
}
}
- return klass.Ptr();
+ return klass;
}
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
@@ -302,10 +302,10 @@
template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline ObjPtr<mirror::Array> AllocArrayFromCodeResolved(ObjPtr<mirror::Class> klass,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
@@ -743,7 +743,7 @@
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
- mirror::Throwable* saved_exception = nullptr;
+ ObjPtr<mirror::Throwable> saved_exception = nullptr;
if (UNLIKELY(self->IsExceptionPending())) {
saved_exception = self->GetException();
self->ClearException();
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 19498f3..ad97c31 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -56,8 +56,10 @@
}
}
-JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
- jobject rcvr_jobj, jobject interface_method_jobj,
+JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa,
+ const char* shorty,
+ jobject rcvr_jobj,
+ jobject interface_method_jobj,
std::vector<jvalue>& args) {
DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy));
@@ -80,7 +82,7 @@
} else {
JValue jv;
jv.SetJ(args[i].j);
- mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv).Ptr();
+ ObjPtr<mirror::Object> val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
if (val == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
@@ -112,7 +114,7 @@
ObjPtr<mirror::Class> result_type = interface_method->ResolveReturnType();
ObjPtr<mirror::Object> result_ref = soa.Decode<mirror::Object>(result);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(result_ref.Ptr(), result_type, &result_unboxed)) {
+ if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index e10a6e8..a8618bd 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -50,35 +50,36 @@
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
template <bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline ObjPtr<mirror::Object> AllocObjectFromCode(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE
+inline ObjPtr<mirror::Object> AllocObjectFromCodeResolved(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(
- mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE
+inline ObjPtr<mirror::Object> AllocObjectFromCodeInitialized(ObjPtr<mirror::Class> klass,
+ Thread* self,
+ gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
template <bool kAccessCheck>
-ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(dex::TypeIndex type_idx,
- int32_t component_count,
- ArtMethod* method,
- bool* slow_path)
+ALWAYS_INLINE inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
+ int32_t component_count,
+ ArtMethod* method,
+ bool* slow_path)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -96,10 +97,11 @@
REQUIRES(!Roles::uninterruptible_);
template <bool kInstrumented>
-ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE
+inline ObjPtr<mirror::Array> AllocArrayFromCodeResolved(ObjPtr<mirror::Class> klass,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -179,8 +181,10 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
-JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
- jobject rcvr_jobj, jobject interface_art_method_jobj,
+JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa,
+ const char* shorty,
+ jobject rcvr_jobj,
+ jobject interface_art_method_jobj,
std::vector<jvalue>& args)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index abefa4a..1b3bb6a 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -60,11 +60,11 @@
}
}
if (kInitialized) {
- return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type);
+ return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type).Ptr();
} else if (!kFinalize) {
- return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type);
+ return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type).Ptr();
} else {
- return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type);
+ return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type).Ptr();
}
}
@@ -95,8 +95,8 @@
mirror::Class* klass, int32_t component_count, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
- return AllocArrayFromCodeResolved<instrumented_bool>(klass, component_count, self, \
- allocator_type); \
+ return AllocArrayFromCodeResolved<instrumented_bool>( \
+ klass, component_count, self, allocator_type).Ptr(); \
} \
extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 2127f1d..c8878e1 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -186,7 +186,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t arg[Instruction::kMaxVarArgRegs] = {};
inst->GetVarArgs(arg, inst_data);
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
int length = str->GetLength();
int index = shadow_frame->GetVReg(arg[1]);
uint16_t res;
@@ -210,8 +210,8 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t arg[Instruction::kMaxVarArgRegs] = {};
inst->GetVarArgs(arg, inst_data);
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
- mirror::Object* arg1 = shadow_frame->GetVRegReference(arg[1]);
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ ObjPtr<mirror::Object> arg1 = shadow_frame->GetVRegReference(arg[1]);
if (arg1 == nullptr) {
return false;
}
@@ -227,7 +227,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) { \
uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
inst->GetVarArgs(arg, inst_data); \
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
int ch = shadow_frame->GetVReg(arg[1]); \
if (ch >= 0x10000) { \
/* Punt if supplementary char. */ \
@@ -251,7 +251,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) { \
uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
inst->GetVarArgs(arg, inst_data); \
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
result_register->operation; \
return true; \
}
@@ -271,11 +271,11 @@
// Start, end & index already checked by caller - won't throw. Destination is uncompressed.
uint32_t arg[Instruction::kMaxVarArgRegs] = {};
inst->GetVarArgs(arg, inst_data);
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
int32_t start = shadow_frame->GetVReg(arg[1]);
int32_t end = shadow_frame->GetVReg(arg[2]);
int32_t index = shadow_frame->GetVReg(arg[4]);
- mirror::CharArray* array = shadow_frame->GetVRegReference(arg[3])->AsCharArray();
+ ObjPtr<mirror::CharArray> array = shadow_frame->GetVRegReference(arg[3])->AsCharArray();
uint16_t* dst = array->GetData() + index;
int32_t len = (end - start);
if (str->IsCompressed()) {
@@ -298,11 +298,11 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t arg[Instruction::kMaxVarArgRegs] = {};
inst->GetVarArgs(arg, inst_data);
- mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
- mirror::Object* obj = shadow_frame->GetVRegReference(arg[1]);
+ ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ ObjPtr<mirror::Object> obj = shadow_frame->GetVRegReference(arg[1]);
bool res = false; // Assume not equal.
if ((obj != nullptr) && obj->IsString()) {
- mirror::String* str2 = obj->AsString();
+ ObjPtr<mirror::String> str2 = obj->AsString();
if (str->GetCount() == str2->GetCount()) {
// Length & compression status are same. Can use block compare.
void* bytes1;
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 912c444..e8d98a4 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -412,7 +412,7 @@
return true;
}
// Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- mirror::Object* obj = vreg_addr->AsMirrorPtr();
+ ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
return true;
@@ -434,11 +434,12 @@
return false; // Caller will check for pending exception. Return value unimportant.
}
// Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
- mirror::Object* obj = vreg_addr->AsMirrorPtr();
+ ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
return (obj != nullptr) && obj->InstanceOf(c);
}
-extern "C" size_t MterpFillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
+extern "C" size_t MterpFillArrayData(mirror::Object* obj,
+ const Instruction::ArrayDataPayload* payload)
REQUIRES_SHARED(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
@@ -446,7 +447,7 @@
extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
- mirror::Object* obj = nullptr;
+ ObjPtr<mirror::Object> obj = nullptr;
ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
shadow_frame->GetMethod(),
self,
@@ -457,7 +458,7 @@
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<true>(c.Ptr(),
+ obj = AllocObjectFromCode<true>(c,
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
@@ -483,13 +484,13 @@
uint32_t inst_data)
REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
- mirror::Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
+ ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
return false;
}
int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
- mirror::Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
- mirror::ObjectArray<mirror::Object>* array = a->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
array->SetWithoutChecks<false>(index, val);
return true;
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 7d889c0..15e3a4b 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -478,8 +478,8 @@
// through from a transformer.
size_t first_arg_register = operands->GetOperand(0);
ObjPtr<mirror::EmulatedStackFrame> emulated_stack_frame(
- reinterpret_cast<mirror::EmulatedStackFrame*>(
- shadow_frame.GetVRegReference(first_arg_register)));
+ ObjPtr<mirror::EmulatedStackFrame>::DownCast(MakeObjPtr(
+ shadow_frame.GetVRegReference(first_arg_register))));
if (!emulated_stack_frame->WriteToShadowFrame(self,
target_type,
first_dest_reg,
@@ -527,8 +527,8 @@
StackHandleScope<2> hs(self);
size_t first_callee_register = operands->GetOperand(0);
Handle<mirror::EmulatedStackFrame> emulated_stack_frame(
- hs.NewHandle(reinterpret_cast<mirror::EmulatedStackFrame*>(
- shadow_frame.GetVRegReference(first_callee_register))));
+ hs.NewHandle(ObjPtr<mirror::EmulatedStackFrame>::DownCast(MakeObjPtr(
+ shadow_frame.GetVRegReference(first_callee_register)))));
Handle<mirror::MethodType> emulated_stack_type(hs.NewHandle(emulated_stack_frame->GetType()));
JValue local_result;
local_result.SetJ(result->GetJ());
@@ -580,8 +580,8 @@
// through the handle directly to the callee, instead of having to
// instantiate a new stack frame based on the shadow frame.
size_t first_callee_register = operands->GetOperand(0);
- sf.Assign(reinterpret_cast<mirror::EmulatedStackFrame*>(
- shadow_frame.GetVRegReference(first_callee_register)));
+ sf.Assign(ObjPtr<mirror::EmulatedStackFrame>::DownCast(MakeObjPtr(
+ shadow_frame.GetVRegReference(first_callee_register))));
} else {
sf.Assign(mirror::EmulatedStackFrame::CreateFromShadowFrameAndArgs(self,
callsite_type,
@@ -1030,14 +1030,14 @@
}
// Get the receiver
- mirror::Object* receiver = shadow_frame.GetVRegReference(operands->GetOperand(0));
+ ObjPtr<mirror::Object> receiver = shadow_frame.GetVRegReference(operands->GetOperand(0));
if (receiver == nullptr) {
ThrowNullPointerException("Expected argument 1 to be a non-null VarHandle");
return false;
}
// Cast to VarHandle instance
- Handle<mirror::VarHandle> vh(hs.NewHandle(down_cast<mirror::VarHandle*>(receiver)));
+ Handle<mirror::VarHandle> vh(hs.NewHandle(ObjPtr<mirror::VarHandle>::DownCast(receiver)));
DCHECK(GetClassRoot<mirror::VarHandle>()->IsAssignableFrom(vh->GetClass()));
// Determine the accessor kind to dispatch