summaryrefslogtreecommitdiff
path: root/runtime/entrypoints
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/entrypoints')
-rw-r--r--runtime/entrypoints/entrypoint_asm_constants.h31
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h420
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc145
-rw-r--r--runtime/entrypoints/entrypoint_utils.h26
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc6
-rw-r--r--runtime/entrypoints/math_entrypoints_test.cc4
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h21
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h1
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc19
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h3
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc103
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc48
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc30
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc624
-rw-r--r--runtime/entrypoints/runtime_asm_entrypoints.h12
18 files changed, 723 insertions, 773 deletions
diff --git a/runtime/entrypoints/entrypoint_asm_constants.h b/runtime/entrypoints/entrypoint_asm_constants.h
new file mode 100644
index 0000000000..bd7ebd3c73
--- /dev/null
+++ b/runtime/entrypoints/entrypoint_asm_constants.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_ASM_CONSTANTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_ASM_CONSTANTS_H_
+
+// Reserved area on stack for art_quick_generic_jni_trampoline:
+// 4 local state ref
+// 4 padding
+// 4096 4k scratch space, enough for 2x 256 8-byte parameters
+// 8*(32+32) max 32 GPRs and 32 FPRs on each architecture, 8 bytes each
+// + 4 padding for 16-bytes alignment
+// -----------
+// 4616
+// Round up to 5k, total 5120
+#define GENERIC_JNI_TRAMPOLINE_RESERVED_AREA 5120
+
+#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_ASM_CONSTANTS_H_
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 4ee1013816..4a1dfba786 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -34,7 +34,6 @@
#include "imt_conflict_table.h"
#include "imtable-inl.h"
#include "indirect_reference_table.h"
-#include "jni/jni_internal.h"
#include "mirror/array-alloc-inl.h"
#include "mirror/class-alloc-inl.h"
#include "mirror/class-inl.h"
@@ -122,7 +121,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
// "charAt" special case. It is the only non-leaf method we inline across dex files.
- ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+ ArtMethod* inlined_method = WellKnownClasses::java_lang_String_charAt;
DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
return inlined_method;
}
@@ -290,7 +289,6 @@ inline ObjPtr<mirror::Object> AllocObjectFromCodeInitialized(ObjPtr<mirror::Clas
}
-template <bool kAccessCheck>
ALWAYS_INLINE
inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
int32_t component_count,
@@ -312,7 +310,7 @@ inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
}
CHECK(klass->IsArrayClass()) << klass->PrettyClass();
}
- if (kAccessCheck) {
+ if (!method->SkipAccessChecks()) {
ObjPtr<mirror::Class> referrer = method->GetDeclaringClass();
if (UNLIKELY(!referrer->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referrer, klass);
@@ -327,7 +325,7 @@ inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-template <bool kAccessCheck, bool kInstrumented>
+template <bool kInstrumented>
ALWAYS_INLINE
inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx,
int32_t component_count,
@@ -335,8 +333,7 @@ inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
- ObjPtr<mirror::Class> klass =
- CheckArrayAlloc<kAccessCheck>(type_idx, component_count, method, &slow_path);
+ ObjPtr<mirror::Class> klass = CheckArrayAlloc(type_idx, component_count, method, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
@@ -376,76 +373,77 @@ inline ObjPtr<mirror::Array> AllocArrayFromCodeResolved(ObjPtr<mirror::Class> kl
allocator_type);
}
-template<FindFieldType type, bool access_check>
-inline ArtField* FindFieldFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self,
- size_t expected_size) {
- constexpr bool is_primitive = (type & FindFieldFlags::PrimitiveBit) != 0;
- constexpr bool is_set = (type & FindFieldFlags::WriteBit) != 0;
- constexpr bool is_static = (type & FindFieldFlags::StaticBit) != 0;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
- ArtField* resolved_field;
- if (access_check) {
- // Slow path: According to JLS 13.4.8, a linkage error may occur if a compile-time
- // qualifying type of a field and the resolved run-time qualifying type of a field differed
- // in their static-ness.
- //
- // In particular, don't assume the dex instruction already correctly knows if the
- // real field is static or not. The resolution must not be aware of this.
- ArtMethod* method = referrer->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(method->GetDexCache()));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(method->GetClassLoader()));
-
- resolved_field = class_linker->ResolveFieldJLS(field_idx,
- h_dex_cache,
- h_class_loader);
- } else {
- // Fast path: Verifier already would've called ResolveFieldJLS and we wouldn't
- // be executing here if there was a static/non-static mismatch.
- resolved_field = class_linker->ResolveField(field_idx, referrer, is_static);
+FLATTEN
+inline ArtField* ResolveFieldWithAccessChecks(Thread* self,
+ ClassLinker* class_linker,
+ uint16_t field_index,
+ ArtMethod* caller,
+ bool is_static,
+ bool is_put,
+ size_t resolve_field_type) // Resolve if not zero
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (caller->SkipAccessChecks()) {
+ return class_linker->ResolveField(field_index, caller, is_static);
}
- if (UNLIKELY(resolved_field == nullptr)) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
+ caller = caller->GetInterfaceMethodIfProxy(class_linker->GetImagePointerSize());
+
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
+
+ ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
+ h_dex_cache,
+ h_class_loader);
+ if (resolved_field == nullptr) {
+ return nullptr;
}
+
ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
- if (access_check) {
- if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
- ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer);
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
+ return nullptr;
+ }
+ ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
+ if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
+ resolved_field,
+ caller->GetDexCache(),
+ field_index))) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+ if (UNLIKELY(is_put && !resolved_field->CanBeChangedBy(caller))) {
+ ThrowIllegalAccessErrorFinalField(caller, resolved_field);
+ return nullptr;
+ }
+
+ if (resolve_field_type != 0u) {
+ StackArtFieldHandleScope<1> rhs(self);
+ ReflectiveHandle<ArtField> field_handle(rhs.NewHandle(resolved_field));
+ if (resolved_field->ResolveType().IsNull()) {
+ DCHECK(self->IsExceptionPending());
return nullptr;
}
- ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
- if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
- resolved_field,
- referrer->GetDexCache(),
- field_idx))) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
- }
- if (UNLIKELY(is_set && !resolved_field->CanBeChangedBy(referrer))) {
- ThrowIllegalAccessErrorFinalField(referrer, resolved_field);
- return nullptr; // Failure.
- } else {
- if (UNLIKELY(resolved_field->IsPrimitiveType() != is_primitive ||
- resolved_field->FieldSize() != expected_size)) {
- self->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
- "Attempted read of %zd-bit %s on field '%s'",
- expected_size * (32 / sizeof(int32_t)),
- is_primitive ? "primitive" : "non-primitive",
- resolved_field->PrettyField(true).c_str());
- return nullptr; // Failure.
- }
- }
+ resolved_field = field_handle.Get();
}
- if (!is_static) {
+ return resolved_field;
+}
+
+template<FindFieldType type>
+inline ArtField* FindFieldFromCode(uint32_t field_idx,
+ ArtMethod* referrer,
+ Thread* self,
+ bool should_resolve_type = false) {
+ constexpr bool is_set = (type & FindFieldFlags::WriteBit) != 0;
+ constexpr bool is_static = (type & FindFieldFlags::StaticBit) != 0;
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* resolved_field = ResolveFieldWithAccessChecks(
+ self, class_linker, field_idx, referrer, is_static, is_set, should_resolve_type ? 1u : 0u);
+ if (!is_static || resolved_field == nullptr) {
// instance fields must be being accessed on an initialized class
return resolved_field;
} else {
+ ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
// If the class is initialized we're done.
if (LIKELY(fields_class->IsVisiblyInitialized())) {
return resolved_field;
@@ -463,28 +461,147 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx,
}
}
+// NOLINTBEGIN(bugprone-macro-parentheses)
// Explicit template declarations of FindFieldFromCode for all field access types.
-#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
+#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type) \
template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
-ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
- ArtMethod* referrer, \
- Thread* self, size_t expected_size) \
-
-#define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
- EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \
- EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, true)
-
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstanceObjectRead);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstanceObjectWrite);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstancePrimitiveRead);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(InstancePrimitiveWrite);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticObjectRead);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticObjectWrite);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveRead);
-EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
-
-#undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
+ArtField* FindFieldFromCode<_type>(uint32_t field_idx, \
+ ArtMethod* referrer, \
+ Thread* self, \
+ bool should_resolve_type = false) \
+
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(InstanceObjectRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(InstanceObjectWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(InstancePrimitiveRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(InstancePrimitiveWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(StaticObjectRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(StaticObjectWrite);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(StaticPrimitiveRead);
+EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(StaticPrimitiveWrite);
+
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
+// NOLINTEND(bugprone-macro-parentheses)
+
+static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
+ const char* method_name = dex_file->GetMethodName(method_id);
+ // Instead of calling ResolveMethod() which has suspend point and can trigger
+ // GC, look up the method symbolically.
+ // Compare method's class name and method name against string init.
+ // It's ok since it's not allowed to create your own java/lang/String.
+ // TODO: verify that assumption.
+ if ((strcmp(class_name, "Ljava/lang/String;") == 0) &&
+ (strcmp(method_name, "<init>") == 0)) {
+ return true;
+ }
+ return false;
+}
+
+static inline bool IsStringInit(const Instruction& instr, ArtMethod* caller)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (instr.Opcode() == Instruction::INVOKE_DIRECT ||
+ instr.Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
+ uint16_t callee_method_idx = (instr.Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
+ instr.VRegB_3rc() : instr.VRegB_35c();
+ return IsStringInit(caller->GetDexFile(), callee_method_idx);
+ }
+ return false;
+}
+
+extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr);
+
+template <InvokeType type>
+ArtMethod* FindMethodToCall(Thread* self,
+ ArtMethod* caller,
+ ObjPtr<mirror::Object>* this_object,
+ const Instruction& inst,
+ bool only_lookup_tls_cache,
+ /*out*/ bool* string_init)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+
+ // Try to find the method in thread-local cache.
+ size_t tls_value = 0u;
+ if (!self->GetInterpreterCache()->Get(self, &inst, &tls_value)) {
+ if (only_lookup_tls_cache) {
+ return nullptr;
+ }
+ DCHECK(!self->IsExceptionPending());
+ // NterpGetMethod can suspend, so save this_object.
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+ tls_value = NterpGetMethod(self, caller, reinterpret_cast<const uint16_t*>(&inst));
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
+ }
+
+ if (type != kStatic && UNLIKELY((*this_object) == nullptr)) {
+ if (UNLIKELY(IsStringInit(inst, caller))) {
+ // Hack for String init:
+ //
+ // We assume that the input of String.<init> in verified code is always
+ // an uninitialized reference. If it is a null constant, it must have been
+ // optimized out by the compiler and we arrive here after deoptimization.
+ // Do not throw NullPointerException.
+ } else {
+ // Maintain interpreter-like semantics where NullPointerException is thrown
+ // after potential NoSuchMethodError from class linker.
+ const uint32_t method_idx = inst.VRegB();
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
+ return nullptr;
+ }
+ }
+
+ static constexpr size_t kStringInitMethodFlag = 0b1;
+ static constexpr size_t kInvokeInterfaceOnObjectMethodFlag = 0b1;
+ static constexpr size_t kMethodMask = ~0b11;
+
+ ArtMethod* called_method = nullptr;
+ switch (type) {
+ case kDirect:
+ case kSuper:
+ case kStatic:
+ // Note: for the interpreter, the String.<init> special casing for invocation is handled
+ // in DoCallCommon.
+ *string_init = ((tls_value & kStringInitMethodFlag) != 0);
+ DCHECK_EQ(*string_init, IsStringInit(inst, caller));
+ called_method = reinterpret_cast<ArtMethod*>(tls_value & kMethodMask);
+ break;
+ case kInterface:
+ if ((tls_value & kInvokeInterfaceOnObjectMethodFlag) != 0) {
+ // invokeinterface on a j.l.Object method.
+ uint16_t method_index = tls_value >> 16;
+ called_method = (*this_object)->GetClass()->GetVTableEntry(method_index, pointer_size);
+ } else {
+ ArtMethod* interface_method = reinterpret_cast<ArtMethod*>(tls_value & kMethodMask);
+ called_method = (*this_object)->GetClass()->GetImt(pointer_size)->Get(
+ interface_method->GetImtIndex(), pointer_size);
+ if (called_method->IsRuntimeMethod()) {
+ called_method = (*this_object)->GetClass()->FindVirtualMethodForInterface(
+ interface_method, pointer_size);
+ if (UNLIKELY(called_method == nullptr)) {
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
+ interface_method, *this_object, caller);
+ return nullptr;
+ }
+ }
+ }
+ break;
+ case kVirtual:
+ called_method = (*this_object)->GetClass()->GetVTableEntry(tls_value, pointer_size);
+ break;
+ }
+
+ if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError((type == kStatic) ? nullptr : *this_object);
+ return nullptr;
+ }
+ DCHECK(!called_method->IsRuntimeMethod()) << called_method->PrettyMethod();
+ return called_method;
+}
template<bool access_check>
ALWAYS_INLINE ArtMethod* FindSuperMethodToCall(uint32_t method_idx,
@@ -546,130 +663,6 @@ ALWAYS_INLINE ArtMethod* FindSuperMethodToCall(uint32_t method_idx,
return super_class->GetVTableEntry(vtable_index, linker->GetImagePointerSize());
}
-// Follow virtual/interface indirections if applicable.
-// Will throw null-pointer exception the if the object is null.
-template<InvokeType type, bool access_check>
-ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
- ArtMethod* resolved_method,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- // Null pointer check.
- if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
- if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
- resolved_method->IsConstructor())) {
- // Hack for String init:
- //
- // We assume that the input of String.<init> in verified code is always
- // an unitialized reference. If it is a null constant, it must have been
- // optimized out by the compiler. Do not throw NullPointerException.
- } else {
- // Maintain interpreter-like semantics where NullPointerException is thrown
- // after potential NoSuchMethodError from class linker.
- ThrowNullPointerExceptionForMethodAccess(method_idx, type);
- return nullptr; // Failure.
- }
- }
- switch (type) {
- case kStatic:
- case kDirect:
- return resolved_method;
- case kVirtual: {
- ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
- uint16_t vtable_index = resolved_method->GetMethodIndex();
- if (access_check &&
- (!klass->HasVTable() ||
- vtable_index >= static_cast<uint32_t>(klass->GetVTableLength()))) {
- // Behavior to agree with that of the verifier.
- ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
- resolved_method->GetName(), resolved_method->GetSignature());
- return nullptr; // Failure.
- }
- DCHECK(klass->HasVTable()) << klass->PrettyClass();
- return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
- }
- case kSuper: {
- return FindSuperMethodToCall<access_check>(method_idx, resolved_method, referrer, self);
- }
- case kInterface: {
- size_t imt_index = resolved_method->GetImtIndex();
- PointerSize pointer_size = class_linker->GetImagePointerSize();
- ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
- ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
- if (!imt_method->IsRuntimeMethod()) {
- if (kIsDebugBuild) {
- ArtMethod* method = klass->FindVirtualMethodForInterface(
- resolved_method, class_linker->GetImagePointerSize());
- CHECK_EQ(imt_method, method) << ArtMethod::PrettyMethod(resolved_method) << " / "
- << imt_method->PrettyMethod() << " / "
- << ArtMethod::PrettyMethod(method) << " / "
- << klass->PrettyClass();
- }
- return imt_method;
- } else {
- ArtMethod* interface_method = klass->FindVirtualMethodForInterface(
- resolved_method, class_linker->GetImagePointerSize());
- if (UNLIKELY(interface_method == nullptr)) {
- ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
- *this_object, referrer);
- return nullptr; // Failure.
- }
- return interface_method;
- }
- }
- default:
- LOG(FATAL) << "Unknown invoke type " << type;
- return nullptr; // Failure.
- }
-}
-
-template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- ArtMethod* resolved_method;
- if (type == kStatic) {
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- } else {
- StackHandleScope<1> hs(self);
- HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- }
- if (UNLIKELY(resolved_method == nullptr)) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
- }
- return FindMethodToCall<type, access_check>(
- method_idx, resolved_method, this_object, referrer, self);
-}
-
-// Explicit template declarations of FindMethodFromCode for all invoke types.
-#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
- ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
- ObjPtr<mirror::Object>* this_object, \
- ArtMethod* referrer, \
- Thread* self)
-#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
- EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \
- EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true)
-
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kStatic);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kDirect);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kVirtual);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kSuper);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
-
-#undef EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL
-#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
-
inline ObjPtr<mirror::Class> ResolveVerifyAndClinit(dex::TypeIndex type_idx,
ArtMethod* referrer,
Thread* self,
@@ -724,13 +717,6 @@ inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
}
}
-inline bool NeedsClinitCheckBeforeCall(ArtMethod* method) {
- // The class needs to be visibly initialized before we can use entrypoints to
- // compiled code for static methods. See b/18161648 . The class initializer is
- // special as it is invoked during initialization and does not need the check.
- return method->IsStatic() && !method->IsConstructor();
-}
-
inline ObjPtr<mirror::Object> GetGenericJniSynchronizationObject(Thread* self, ArtMethod* called)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!called->IsCriticalNative());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 63d2aa4351..aa27df4dc5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -22,6 +22,7 @@
#include "base/mutex.h"
#include "base/sdk_version.h"
#include "class_linker-inl.h"
+#include "class_root-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/method_reference.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -33,14 +34,14 @@
#include "mirror/class-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
+#include "mirror/object_array-alloc-inl.h"
#include "nth_caller_visitor.h"
#include "oat_file.h"
#include "oat_file-inl.h"
#include "oat_quick_method_header.h"
#include "reflection.h"
#include "scoped_thread_state_change-inl.h"
-#include "well_known_classes.h"
+#include "well_known_classes-inl.h"
namespace art {
@@ -65,60 +66,70 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa,
jobject rcvr_jobj,
jobject interface_method_jobj,
std::vector<jvalue>& args) {
- DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy));
+ StackHandleScope<4u> hs(soa.Self());
+ DCHECK(rcvr_jobj != nullptr);
+ Handle<mirror::Object> h_receiver = hs.NewHandle(soa.Decode<mirror::Object>(rcvr_jobj));
+ DCHECK(h_receiver->InstanceOf(GetClassRoot(ClassRoot::kJavaLangReflectProxy)));
+ Handle<mirror::Method> h_interface_method =
+ hs.NewHandle(soa.Decode<mirror::Method>(interface_method_jobj));
// Build argument array possibly triggering GC.
soa.Self()->AssertThreadSuspensionIsAllowable();
- jobjectArray args_jobj = nullptr;
+ auto h_args = hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr);
const JValue zero;
- uint32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
+ Runtime* runtime = Runtime::Current();
+ uint32_t target_sdk_version = runtime->GetTargetSdkVersion();
// Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
if (args.size() > 0 || IsSdkVersionSetAndAtMost(target_sdk_version, SdkVersion::kL)) {
- args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
- if (args_jobj == nullptr) {
+ h_args.Assign(mirror::ObjectArray<mirror::Object>::Alloc(
+ soa.Self(), GetClassRoot<mirror::ObjectArray<mirror::Object>>(), args.size()));
+ if (h_args == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
for (size_t i = 0; i < args.size(); ++i) {
+ ObjPtr<mirror::Object> value;
if (shorty[i + 1] == 'L') {
- jobject val = args[i].l;
- soa.Env()->SetObjectArrayElement(args_jobj, i, val);
+ value = soa.Decode<mirror::Object>(args[i].l);
} else {
JValue jv;
jv.SetJ(args[i].j);
- ObjPtr<mirror::Object> val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
- if (val == nullptr) {
+ value = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
+ if (value == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
- soa.Decode<mirror::ObjectArray<mirror::Object>>(args_jobj)->Set<false>(i, val);
}
+ // We do not support `Proxy.invoke()` in a transaction.
+ h_args->SetWithoutChecks</*kActiveTransaction=*/ false>(i, value);
}
}
// Call Proxy.invoke(Proxy proxy, Method method, Object[] args).
- jvalue invocation_args[3];
- invocation_args[0].l = rcvr_jobj;
- invocation_args[1].l = interface_method_jobj;
- invocation_args[2].l = args_jobj;
- jobject result =
- soa.Env()->CallStaticObjectMethodA(WellKnownClasses::java_lang_reflect_Proxy,
- WellKnownClasses::java_lang_reflect_Proxy_invoke,
- invocation_args);
+ Handle<mirror::Object> h_result = hs.NewHandle(
+ WellKnownClasses::java_lang_reflect_Proxy_invoke->InvokeStatic<'L', 'L', 'L', 'L'>(
+ soa.Self(), h_receiver.Get(), h_interface_method.Get(), h_args.Get()));
// Unbox result and handle error conditions.
if (LIKELY(!soa.Self()->IsExceptionPending())) {
- if (shorty[0] == 'V' || (shorty[0] == 'L' && result == nullptr)) {
+ if (shorty[0] == 'V' || (shorty[0] == 'L' && h_result == nullptr)) {
// Do nothing.
return zero;
} else {
- ArtMethod* interface_method =
- soa.Decode<mirror::Method>(interface_method_jobj)->GetArtMethod();
- // This can cause thread suspension.
- ObjPtr<mirror::Class> result_type = interface_method->ResolveReturnType();
- ObjPtr<mirror::Object> result_ref = soa.Decode<mirror::Object>(result);
+ ObjPtr<mirror::Class> result_type;
+ if (shorty[0] == 'L') {
+ // This can cause thread suspension.
+ result_type = h_interface_method->GetArtMethod()->ResolveReturnType();
+ if (result_type == nullptr) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return zero;
+ }
+ } else {
+ result_type = runtime->GetClassLinker()->LookupPrimitiveClass(shorty[0]);
+ DCHECK(result_type != nullptr);
+ }
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
+ if (!UnboxPrimitiveForResult(h_result.Get(), result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -197,51 +208,62 @@ static inline std::pair<ArtMethod*, uintptr_t> DoGetCalleeSaveMethodOuterCallerA
return std::make_pair(outer_method, caller_pc);
}
-static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
- uintptr_t caller_pc,
- bool do_caller_check)
+static inline ArtMethod* DoGetCalleeSaveMethodCallerAndDexPc(ArtMethod** sp,
+ CalleeSaveType type,
+ ArtMethod* outer_method,
+ uintptr_t caller_pc,
+ uint32_t* dex_pc,
+ bool do_caller_check)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* caller = outer_method;
- if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- if (outer_method != nullptr) {
- const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
- DCHECK(current_code != nullptr);
- if (current_code->IsOptimized() &&
- CodeInfo::HasInlineInfo(current_code->GetOptimizedCodeInfoPtr())) {
- uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- DCHECK(stack_map.IsValid());
- BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
- if (!inline_infos.empty()) {
- caller = GetResolvedMethod(outer_method, code_info, inline_infos);
- }
+ if (outer_method != nullptr) {
+ const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
+ DCHECK(current_code != nullptr);
+ if (current_code->IsOptimized() &&
+ CodeInfo::HasInlineInfo(current_code->GetOptimizedCodeInfoPtr())) {
+ uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ DCHECK(stack_map.IsValid());
+ BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+ if (!inline_infos.empty()) {
+ caller = GetResolvedMethod(outer_method, code_info, inline_infos);
+ *dex_pc = inline_infos.back().GetDexPc();
+ } else {
+ *dex_pc = stack_map.GetDexPc();
}
+ } else {
+ size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
+ ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
+ reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+ *dex_pc = current_code->ToDexPc(caller_sp, caller_pc);
}
- if (kIsDebugBuild && do_caller_check) {
- // Note that do_caller_check is optional, as this method can be called by
- // stubs, and tests without a proper call stack.
- NthCallerVisitor visitor(Thread::Current(), 1, true);
- visitor.WalkStack();
- CHECK_EQ(caller, visitor.caller);
- }
- } else {
- // We're instrumenting, just use the StackVisitor which knows how to
- // handle instrumented frames.
+ }
+ if (kIsDebugBuild && do_caller_check) {
+ // Note that do_caller_check is optional, as this method can be called by
+ // stubs, and tests without a proper call stack.
NthCallerVisitor visitor(Thread::Current(), 1, true);
visitor.WalkStack();
- caller = visitor.caller;
+ CHECK_EQ(caller, visitor.caller);
}
return caller;
}
-ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, CalleeSaveType type, bool do_caller_check)
+ArtMethod* GetCalleeSaveMethodCallerAndDexPc(ArtMethod** sp,
+ CalleeSaveType type,
+ uint32_t* dex_pc,
+ bool do_caller_check)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
ArtMethod* outer_method = outer_caller_and_pc.first;
uintptr_t caller_pc = outer_caller_and_pc.second;
- ArtMethod* caller = DoGetCalleeSaveMethodCaller(outer_method, caller_pc, do_caller_check);
+ ArtMethod* caller = DoGetCalleeSaveMethodCallerAndDexPc(sp,
+ type,
+ outer_method,
+ caller_pc,
+ dex_pc,
+ do_caller_check);
return caller;
}
@@ -252,8 +274,13 @@ CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, Calle
auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
result.outer_method = outer_caller_and_pc.first;
uintptr_t caller_pc = outer_caller_and_pc.second;
- result.caller =
- DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true);
+ uint32_t dex_pc;
+ result.caller = DoGetCalleeSaveMethodCallerAndDexPc(sp,
+ type,
+ result.outer_method,
+ caller_pc,
+ &dex_pc,
+ /* do_caller_check= */ true);
return result;
}
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8b6fc69bea..cfa744d278 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -77,7 +77,6 @@ inline ObjPtr<mirror::Object> AllocObjectFromCodeInitialized(ObjPtr<mirror::Clas
REQUIRES(!Roles::uninterruptible_);
-template <bool kAccessCheck>
ALWAYS_INLINE inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_idx,
int32_t component_count,
ArtMethod* method,
@@ -89,7 +88,7 @@ ALWAYS_INLINE inline ObjPtr<mirror::Class> CheckArrayAlloc(dex::TypeIndex type_i
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-template <bool kAccessCheck, bool kInstrumented = true>
+template <bool kInstrumented = true>
ALWAYS_INLINE inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx,
int32_t component_count,
ArtMethod* method,
@@ -143,11 +142,13 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx,
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
-template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self)
+template<InvokeType type>
+inline ArtMethod* FindMethodToCall(Thread* self,
+ ArtMethod* referrer,
+ ObjPtr<mirror::Object>* this_object,
+ const Instruction& inst,
+ bool only_lookup_tls_cache,
+ /*out*/ bool* string_init)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -187,9 +188,10 @@ bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPaylo
template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
-ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
- CalleeSaveType type,
- bool do_caller_check = false)
+ArtMethod* GetCalleeSaveMethodCallerAndDexPc(ArtMethod** sp,
+ CalleeSaveType type,
+ /* out */ uint32_t* dex_pc,
+ bool do_caller_check = false)
REQUIRES_SHARED(Locks::mutator_lock_);
struct CallerAndOuterMethod {
@@ -203,10 +205,6 @@ CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, Calle
ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
-// Returns whether we need to do class initialization check before invoking the method.
-// The caller is responsible for performing that check.
-bool NeedsClinitCheckBeforeCall(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
-
// Returns the synchronization object for a native method for a GenericJni frame
// we have just created or are about to exit. The synchronization object is
// the class object for static methods and the `this` object otherwise.
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index c78b604b43..e606c2173a 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -75,7 +75,7 @@ extern "C" const void* artFindNativeMethodRunnable(Thread* self)
// These calls do not have an explicit class initialization check, so do the check now.
// (When going through the stub or GenericJNI, the check was already done.)
- DCHECK(NeedsClinitCheckBeforeCall(target_method));
+ DCHECK(target_method->NeedsClinitCheckBeforeCall());
ObjPtr<mirror::Class> declaring_class = target_method->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
StackHandleScope<1> hs(self);
@@ -87,11 +87,11 @@ extern "C" const void* artFindNativeMethodRunnable(Thread* self)
}
// Replace the runtime method on the stack with the target method.
- DCHECK(!self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(!self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
DCHECK(*sp == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
*sp = target_method;
- self->SetTopOfStackTagged(sp); // Fake GenericJNI frame.
+ self->SetTopOfStackGenericJniTagged(sp); // Fake GenericJNI frame.
// Continue with the target method.
method = target_method;
diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc
index f70a2da2f5..fe61f5dd11 100644
--- a/runtime/entrypoints/math_entrypoints_test.cc
+++ b/runtime/entrypoints/math_entrypoints_test.cc
@@ -18,11 +18,11 @@
#include <limits>
-#include "common_runtime_test.h"
+#include "base/common_art_test.h"
namespace art {
-class MathEntrypointsTest : public CommonRuntimeTest {};
+class MathEntrypointsTest : public CommonArtTest {};
TEST_F(MathEntrypointsTest, DoubleToLong) {
EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19));
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 1baccee4fc..7d9b844b0c 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -28,6 +28,7 @@
// specialize the code.
#include "arch/arm/callee_save_frame_arm.h"
#include "arch/arm64/callee_save_frame_arm64.h"
+#include "arch/riscv64/callee_save_frame_riscv64.h"
#include "arch/x86/callee_save_frame_x86.h"
#include "arch/x86_64/callee_save_frame_x86_64.h"
@@ -73,13 +74,25 @@ struct CSFSelector; // No definition for unspecialized callee save frame select
// Note: kThumb2 is never the kRuntimeISA.
template <>
-struct CSFSelector<InstructionSet::kArm> { using type = arm::ArmCalleeSaveFrame; };
+struct CSFSelector<InstructionSet::kArm> {
+ using type = arm::ArmCalleeSaveFrame;
+};
+template <>
+struct CSFSelector<InstructionSet::kArm64> {
+ using type = arm64::Arm64CalleeSaveFrame;
+};
template <>
-struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
+struct CSFSelector<InstructionSet::kRiscv64> {
+ using type = riscv64::Riscv64CalleeSaveFrame;
+};
template <>
-struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
+struct CSFSelector<InstructionSet::kX86> {
+ using type = x86::X86CalleeSaveFrame;
+};
template <>
-struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
+struct CSFSelector<InstructionSet::kX86_64> {
+ using type = x86_64::X86_64CalleeSaveFrame;
+};
} // namespace detail
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index f8856d82b9..cb3caac9ab 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -122,6 +122,7 @@ extern "C" void art_jni_method_start();
extern "C" void art_jni_monitored_method_start();
extern "C" void art_jni_method_end();
extern "C" void art_jni_monitored_method_end();
+extern "C" void art_jni_method_entry_hook();
// JNI lock/unlock entrypoints. Note: Custom calling convention.
extern "C" void art_jni_lock_object(art::mirror::Object*);
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 939feeebcc..ea077889ee 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -79,6 +79,7 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints,
qpoints->SetQuickGenericJniTrampoline(art_quick_generic_jni_trampoline);
qpoints->SetJniDecodeReferenceResult(JniDecodeReferenceResult);
qpoints->SetJniReadBarrier(art_jni_read_barrier);
+ qpoints->SetJniMethodEntryHook(art_jni_method_entry_hook);
// Locks
if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index d06dbcb12a..277bc7bf06 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -25,7 +25,10 @@
namespace art {
-NO_RETURN static void artDeoptimizeImpl(Thread* self, DeoptimizationKind kind, bool single_frame)
+NO_RETURN static void artDeoptimizeImpl(Thread* self,
+ DeoptimizationKind kind,
+ bool single_frame,
+ bool skip_method_exit_callbacks)
REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime::Current()->IncrementDeoptimizationCount(kind);
if (VLOG_IS_ON(deopt)) {
@@ -43,13 +46,12 @@ NO_RETURN static void artDeoptimizeImpl(Thread* self, DeoptimizationKind kind, b
if (single_frame) {
exception_handler.DeoptimizeSingleFrame(kind);
} else {
- exception_handler.DeoptimizeStack();
+ exception_handler.DeoptimizeStack(skip_method_exit_callbacks);
}
- uintptr_t return_pc = exception_handler.UpdateInstrumentationStack();
if (exception_handler.IsFullFragmentDone()) {
exception_handler.DoLongJump(true);
} else {
- exception_handler.DeoptimizePartialFragmentFixup(return_pc);
+ exception_handler.DeoptimizePartialFragmentFixup();
// We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would
// be caller-saved. This has the downside that we cannot track incorrect register usage down the
// line.
@@ -57,9 +59,10 @@ NO_RETURN static void artDeoptimizeImpl(Thread* self, DeoptimizationKind kind, b
}
}
-extern "C" NO_RETURN void artDeoptimize(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- artDeoptimizeImpl(self, DeoptimizationKind::kFullFrame, false);
+ artDeoptimizeImpl(self, DeoptimizationKind::kFullFrame, false, skip_method_exit_callbacks);
}
// This is called directly from compiled code by an HDeoptimize.
@@ -74,7 +77,9 @@ extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind,
self->GetException(),
/* from_code= */ true,
DeoptimizationMethodType::kDefault);
- artDeoptimizeImpl(self, kind, true);
+ // Deopting from compiled code, so method exit haven't run yet. Don't skip method exit callbacks
+ // if required.
+ artDeoptimizeImpl(self, kind, true, /* skip_method_exit_callbacks= */ false);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 60a5875c5e..76bee2152a 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -23,6 +23,7 @@
#include "dex/dex_file_types.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 7af1a0b14e..0e73c63828 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -67,6 +67,7 @@ extern "C" void artJniUnlockObject(mirror::Object* locked, Thread* self)
// JNI entrypoints when monitoring entry/exit.
extern "C" void artJniMonitoredMethodStart(Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_);
extern "C" void artJniMonitoredMethodEnd(Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
+extern "C" void artJniMethodEntryHook(Thread* self);
// StringAppend pattern entrypoint.
extern "C" mirror::String* artStringBuilderAppend(uint32_t format,
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index dffaa4bb25..aa3360e1a4 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -78,6 +78,7 @@
V(JniLockObject, void, mirror::Object*) \
V(JniUnlockObject, void, mirror::Object*) \
V(QuickGenericJniTrampoline, void, ArtMethod*) \
+ V(JniMethodEntryHook, void) \
\
V(LockObject, void, mirror::Object*) \
V(UnlockObject, void, mirror::Object*) \
@@ -150,6 +151,7 @@
\
V(NewEmptyString, void, void) \
V(NewStringFromBytes_B, void, void) \
+ V(NewStringFromBytes_BB, void, void) \
V(NewStringFromBytes_BI, void, void) \
V(NewStringFromBytes_BII, void, void) \
V(NewStringFromBytes_BIII, void, void) \
@@ -164,6 +166,7 @@
V(NewStringFromString, void, void) \
V(NewStringFromStringBuffer, void, void) \
V(NewStringFromStringBuilder, void, void) \
+ V(NewStringFromUtf16Bytes_BII, void, void) \
\
V(StringBuilderAppend, void*, uint32_t) \
\
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d32aa39996..e2fc232670 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -32,7 +32,7 @@ namespace art {
inline ArtField* FindFieldFast(uint32_t field_idx,
ArtMethod* referrer,
FindFieldType type,
- size_t expected_size)
+ bool should_resolve_type = false)
REQUIRES(!Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
@@ -41,7 +41,6 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
return nullptr;
}
// Check for incompatible class change.
- const bool is_primitive = (type & FindFieldFlags::PrimitiveBit) != 0;
const bool is_set = (type & FindFieldFlags::WriteBit) != 0;
const bool is_static = (type & FindFieldFlags::StaticBit) != 0;
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
@@ -63,8 +62,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
// Illegal access.
return nullptr;
}
- if (UNLIKELY(resolved_field->IsPrimitiveType() != is_primitive ||
- resolved_field->FieldSize() != expected_size)) {
+ if (should_resolve_type && resolved_field->LookupResolvedType() == nullptr) {
return nullptr;
}
return resolved_field;
@@ -73,17 +71,17 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
// Helper function to do a null check after trying to resolve the field. Not for statics since obj
// does not exist there. There is a suspend check, object is a double pointer to update the value
// in the caller in case it moves.
-template<FindFieldType type, bool kAccessCheck>
+template<FindFieldType type>
ALWAYS_INLINE static inline ArtField* FindInstanceField(uint32_t field_idx,
ArtMethod* referrer,
Thread* self,
- size_t size,
- mirror::Object** obj)
+ mirror::Object** obj,
+ bool should_resolve_type = false)
REQUIRES(!Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
- ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
+ ArtField* field = FindFieldFromCode<type>(field_idx, referrer, self, should_resolve_type);
if (LIKELY(field != nullptr) && UNLIKELY(h == nullptr)) {
ThrowNullPointerExceptionForFieldAccess(field, referrer, (type & FindFieldFlags::ReadBit) != 0);
return nullptr;
@@ -116,13 +114,12 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
ArtField* field = FindFieldFast( \
- field_idx, referrer, Static ## PrimitiveOrObject ## Read, \
- sizeof(PrimitiveType)); \
+ field_idx, referrer, Static ## PrimitiveOrObject ## Read); \
if (LIKELY(field != nullptr)) { \
return field->Get ## Kind (field->GetDeclaringClass())Ptr; /* NOLINT */ \
} \
- field = FindFieldFromCode<Static ## PrimitiveOrObject ## Read, true>( \
- field_idx, referrer, self, sizeof(PrimitiveType)); \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Read>( \
+ field_idx, referrer, self); \
if (LIKELY(field != nullptr)) { \
return field->Get ## Kind (field->GetDeclaringClass())Ptr; /* NOLINT */ \
} \
@@ -137,13 +134,12 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
ArtField* field = FindFieldFast( \
- field_idx, referrer, Instance ## PrimitiveOrObject ## Read, \
- sizeof(PrimitiveType)); \
+ field_idx, referrer, Instance ## PrimitiveOrObject ## Read); \
if (LIKELY(field != nullptr) && obj != nullptr) { \
return field->Get ## Kind (obj)Ptr; /* NOLINT */ \
} \
- field = FindInstanceField<Instance ## PrimitiveOrObject ## Read, true>( \
- field_idx, referrer, self, sizeof(PrimitiveType), &obj); \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Read>( \
+ field_idx, referrer, self, &obj); \
if (LIKELY(field != nullptr)) { \
return field->Get ## Kind (obj)Ptr; /* NOLINT */ \
} \
@@ -157,33 +153,30 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
+ bool should_resolve_type = (IsObject) && new_value != 0; \
ArtField* field = FindFieldFast( \
- field_idx, referrer, Static ## PrimitiveOrObject ## Write, \
- sizeof(PrimitiveType)); \
+ field_idx, \
+ referrer, \
+ Static ## PrimitiveOrObject ## Write, \
+ should_resolve_type); \
if (UNLIKELY(field == nullptr)) { \
if (IsObject) { \
StackHandleScope<1> hs(self); \
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
reinterpret_cast<mirror::Object**>(&new_value))); \
- field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
- field_idx, referrer, self, sizeof(PrimitiveType)); \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write>( \
+ field_idx, \
+ referrer, \
+ self, \
+ should_resolve_type); \
} else { \
- field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write, true>( \
- field_idx, referrer, self, sizeof(PrimitiveType)); \
+ field = FindFieldFromCode<Static ## PrimitiveOrObject ## Write>( \
+ field_idx, referrer, self); \
} \
if (UNLIKELY(field == nullptr)) { \
return -1; \
} \
} \
- if (!referrer->SkipAccessChecks() && IsObject && new_value != 0) { \
- StackArtFieldHandleScope<1> rhs(self); \
- ReflectiveHandle<ArtField> field_handle(rhs.NewHandle(field)); \
- if (field->ResolveType().IsNull()) { \
- self->AssertPendingException(); \
- return -1; \
- } \
- field = field_handle.Get(); \
- } \
field->Set ## Kind <false>(field->GetDeclaringClass(), new_value); \
return 0; \
} \
@@ -195,43 +188,31 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
+ bool should_resolve_type = (IsObject) && new_value != 0; \
ArtField* field = FindFieldFast( \
- field_idx, referrer, Instance ## PrimitiveOrObject ## Write, \
- sizeof(PrimitiveType)); \
+ field_idx, \
+ referrer, \
+ Instance ## PrimitiveOrObject ## Write, \
+ should_resolve_type); \
if (UNLIKELY(field == nullptr || obj == nullptr)) { \
if (IsObject) { \
StackHandleScope<1> hs(self); \
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper( \
reinterpret_cast<mirror::Object**>(&new_value))); \
- field = \
- FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
- field_idx, \
- referrer, \
- self, \
- sizeof(PrimitiveType), \
- &obj); \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write>( \
+ field_idx, \
+ referrer, \
+ self, \
+ &obj, \
+ should_resolve_type); \
} else { \
- field = \
- FindInstanceField<Instance ## PrimitiveOrObject ## Write, true>( \
- field_idx, \
- referrer, \
- self, \
- sizeof(PrimitiveType), \
- &obj); \
+ field = FindInstanceField<Instance ## PrimitiveOrObject ## Write>( \
+ field_idx, referrer, self, &obj); \
} \
if (UNLIKELY(field == nullptr)) { \
return -1; \
} \
} \
- if (!referrer->SkipAccessChecks() && IsObject && new_value != 0) { \
- StackArtFieldHandleScope<1> rhs(self); \
- ReflectiveHandle<ArtField> field_handle(rhs.NewHandle(field)); \
- if (field->ResolveType().IsNull()) { \
- self->AssertPendingException(); \
- return -1; \
- } \
- field = field_handle.Get(); \
- } \
field->Set ## Kind<false>(obj, new_value); \
return 0; \
} \
@@ -435,7 +416,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx,
}
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return ReadBarrier::Mark(obj);
}
@@ -443,14 +424,12 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS
mirror::Object* obj,
uint32_t offset) {
// Used only in connection with non-volatile loads.
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
mirror::HeapReference<mirror::Object>* ref_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
- constexpr ReadBarrierOption kReadBarrierOption =
- kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier>(
obj,
MemberOffset(offset),
ref_addr);
@@ -458,7 +437,7 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS
}
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return root->Read();
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index ab13bd95b1..6f690018ac 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -38,11 +38,16 @@
namespace art {
-static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
-static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
+extern "C" int artMethodExitHook(Thread* self,
+ ArtMethod* method,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result);
+
+static_assert(sizeof(jni::LRTSegmentState) == sizeof(uint32_t), "LRTSegmentState size unexpected");
+static_assert(std::is_trivial<jni::LRTSegmentState>::value, "LRTSegmentState not trivial");
extern "C" void artJniReadBarrier(ArtMethod* method) {
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
mirror::CompressedReference<mirror::Object>* declaring_class =
method->GetDeclaringClassAddressWithoutBarrier();
if (kUseBakerReadBarrier) {
@@ -77,7 +82,7 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
env->CheckNoHeldMonitors();
}
env->SetLocalSegmentState(env->GetLocalRefCookie());
- env->SetLocalRefCookie(bit_cast<IRTSegmentState>(saved_local_ref_cookie));
+ env->SetLocalRefCookie(bit_cast<jni::LRTSegmentState>(saved_local_ref_cookie));
}
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
@@ -174,11 +179,11 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
artJniUnlockObject(lock.Ptr(), self);
}
char return_shorty_char = called->GetShorty()[0];
+ uint64_t ret;
if (return_shorty_char == 'L') {
- uint64_t ret = reinterpret_cast<uint64_t>(
+ ret = reinterpret_cast<uint64_t>(
UNLIKELY(self->IsExceptionPending()) ? nullptr : JniDecodeReferenceResult(result.l, self));
PopLocalReferences(saved_local_ref_cookie, self);
- return ret;
} else {
if (LIKELY(!critical_native)) {
PopLocalReferences(saved_local_ref_cookie, self);
@@ -188,32 +193,43 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
if (kRuntimeISA == InstructionSet::kX86) {
// Convert back the result to float.
double d = bit_cast<double, uint64_t>(result_f);
- return bit_cast<uint32_t, float>(static_cast<float>(d));
+ ret = bit_cast<uint32_t, float>(static_cast<float>(d));
} else {
- return result_f;
+ ret = result_f;
}
}
+ break;
case 'D':
- return result_f;
+ ret = result_f;
+ break;
case 'Z':
- return result.z;
+ ret = result.z;
+ break;
case 'B':
- return result.b;
+ ret = result.b;
+ break;
case 'C':
- return result.c;
+ ret = result.c;
+ break;
case 'S':
- return result.s;
+ ret = result.s;
+ break;
case 'I':
- return result.i;
+ ret = result.i;
+ break;
case 'J':
- return result.j;
+ ret = result.j;
+ break;
case 'V':
- return 0;
+ ret = 0;
+ break;
default:
LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
UNREACHABLE();
}
}
+
+ return ret;
}
extern "C" void artJniMonitoredMethodStart(Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 93422cf056..5dca58ab04 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -21,16 +21,46 @@
namespace art {
+extern "C" void artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ DCHECK(!self->IsExceptionPending());
+
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(sp != nullptr && (*sp)->IsRuntimeMethod());
+
+ DeoptimizationMethodType type = instr->GetDeoptimizationMethodType(*sp);
+ JValue jvalue;
+ jvalue.SetJ(result);
+ instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref);
+}
+
extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend(/*implicit=*/ true);
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artCompileOptimized(ArtMethod* method, Thread* self)
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b6ece4a86e..7e96f2947c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -61,7 +61,7 @@
namespace art {
extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Thread* self);
-extern "C" NO_RETURN void artDeoptimize(Thread* self);
+extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
class QuickArgumentVisitor {
@@ -141,6 +141,51 @@ class QuickArgumentVisitor {
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
+#elif defined(__riscv)
+ // The callee save frame is pointed to by SP.
+ // | argN | |
+ // | ... | |
+ // | reg. arg spills | | Caller's frame
+ // | Method* | ---
+ // | RA |
+ // | S11/X27 | callee-saved 11
+ // | S10/X26 | callee-saved 10
+ // | S9/X25 | callee-saved 9
+ // | S9/X24 | callee-saved 8
+ // | S7/X23 | callee-saved 7
+ // | S6/X22 | callee-saved 6
+ // | S5/X21 | callee-saved 5
+ // | S4/X20 | callee-saved 4
+ // | S3/X19 | callee-saved 3
+ // | S2/X18 | callee-saved 2
+ // | A7/X17 | arg 7
+ // | A6/X16 | arg 6
+ // | A5/X15 | arg 5
+ // | A4/X14 | arg 4
+ // | A3/X13 | arg 3
+ // | A2/X12 | arg 2
+ // | A1/X11 | arg 1 (A0 is the method => skipped)
+ // | S0/X8/FP | callee-saved 0 (S1 is TR => skipped)
+ // | FA7 | float arg 8
+ // | FA6 | float arg 7
+ // | FA5 | float arg 6
+ // | FA4 | float arg 5
+ // | FA3 | float arg 4
+ // | FA2 | float arg 3
+ // | FA1 | float arg 2
+ // | FA0 | float arg 1
+ // | A0/Method* | <- sp
+ static constexpr bool kSplitPairAcrossRegisterAndStack = false;
+ static constexpr bool kAlignPairRegister = false;
+ static constexpr bool kQuickSoftFloatAbi = false;
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+ static constexpr bool kQuickSkipOddFpRegisters = false;
+ static constexpr size_t kNumQuickGprArgs = 7;
+ static constexpr size_t kNumQuickFprArgs = 8;
+ static constexpr bool kGprFprLockstep = false;
+ static size_t GprIndexToGprOffset(uint32_t gpr_index) {
+ return (gpr_index + 1) * GetBytesPerGprSpillLocation(kRuntimeISA); // skip S0/X8/FP
+ }
#elif defined(__i386__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -224,13 +269,8 @@ class QuickArgumentVisitor {
#endif
public:
- // Special handling for proxy methods. Proxy methods are instance methods so the
- // 'this' object is the 1st argument. They also have the same frame layout as the
- // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
- // 1st GPR.
- static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp)
+ static StackReference<mirror::Object>* GetThisObjectReference(ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK((*sp)->IsProxyMethod());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -239,9 +279,15 @@ class QuickArgumentVisitor {
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address);
}
- static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+ static ArtMethod* GetCallingMethodAndDexPc(ArtMethod** sp, uint32_t* dex_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs);
+ return GetCalleeSaveMethodCallerAndDexPc(sp, CalleeSaveType::kSaveRefsAndArgs, dex_pc);
+ }
+
+ static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t dex_pc;
+ return GetCallingMethodAndDexPc(sp, &dex_pc);
}
static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -251,31 +297,6 @@ class QuickArgumentVisitor {
return *reinterpret_cast<ArtMethod**>(previous_sp);
}
- static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK((*sp)->IsCalleeSaveMethod());
- constexpr size_t callee_frame_size =
- RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
- ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
- reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
- const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
- uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
-
- if (current_code->IsOptimized()) {
- CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
- DCHECK(stack_map.IsValid());
- BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
- if (!inline_infos.empty()) {
- return inline_infos.back().GetDexPc();
- } else {
- return stack_map.GetDexPc();
- }
- } else {
- return current_code->ToDexPc(caller_sp, outer_pc);
- }
- }
-
static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* return_adress_spill =
@@ -529,7 +550,8 @@ class QuickArgumentVisitor {
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr();
+ DCHECK((*sp)->IsProxyMethod());
+ return QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr();
}
// Visits arguments on the stack placing them into the shadow frame.
@@ -647,6 +669,7 @@ static void HandleDeoptimization(JValue* result,
method_type);
}
+NO_STACK_PROTECTOR
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
@@ -654,100 +677,75 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(!method->IsInvokable())) {
- method->ThrowInvocationTimeError();
+ method->ThrowInvocationTimeError(
+ method->IsStatic()
+ ? nullptr
+ : QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr());
return 0;
}
- JValue tmp_value;
- ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
- StackedShadowFrameType::kDeoptimizationShadowFrame, false);
- ManagedStack fragment;
-
DCHECK(!method->IsNative()) << method->PrettyMethod();
- uint32_t shorty_len = 0;
+
+ JValue result;
+
ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
- CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
+ uint32_t shorty_len = 0;
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
- JValue result;
- bool force_frame_pop = false;
-
+ ManagedStack fragment;
+ ShadowFrame* deopt_frame = self->MaybePopDeoptimizedStackedShadowFrame();
if (UNLIKELY(deopt_frame != nullptr)) {
HandleDeoptimization(&result, method, deopt_frame, &fragment);
} else {
+ CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
const char* old_cause = self->StartAssertNoThreadSuspension(
"Building interpreter shadow frame");
uint16_t num_regs = accessor.RegistersSize();
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
+ CREATE_SHADOW_FRAME(num_regs, method, /* dex_pc= */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
shadow_frame, first_arg_reg);
shadow_frame_builder.VisitArguments();
- // Push a transition back into managed code onto the linked list in thread.
- self->PushManagedStackFragment(&fragment);
- self->PushShadowFrame(shadow_frame);
self->EndAssertNoThreadSuspension(old_cause);
- if (NeedsClinitCheckBeforeCall(method)) {
- ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
- if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
- // Ensure static method's class is initialized.
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- DCHECK(Thread::Current()->IsExceptionPending()) << method->PrettyMethod();
- self->PopManagedStackFragment(fragment);
- return 0;
- }
- }
+ // Potentially run <clinit> before pushing the shadow frame. We do not want
+ // to have the called method on the stack if there is an exception.
+ if (!EnsureInitialized(self, shadow_frame)) {
+ DCHECK(self->IsExceptionPending());
+ return 0;
}
+ // Push a transition back into managed code onto the linked list in thread.
+ self->PushManagedStackFragment(&fragment);
+ self->PushShadowFrame(shadow_frame);
result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
- force_frame_pop = shadow_frame->GetForcePopFrame();
}
// Pop transition.
self->PopManagedStackFragment(fragment);
- // Request a stack deoptimization if needed
- ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
- uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
- // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
- // should be done and it knows the real return pc. NB If the upcall is null we don't need to do
- // anything. This can happen during shutdown or early startup.
- if (UNLIKELY(
- caller != nullptr &&
- caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
- if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
- << caller->PrettyMethod();
- } else {
- VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
- << " to " << caller->PrettyMethod()
- << (force_frame_pop ? " for frame-pop" : "");
- DCHECK_IMPLIES(force_frame_pop, result.GetJ() == 0)
- << "Force frame pop should have no result.";
- if (force_frame_pop && self->GetException() != nullptr) {
- LOG(WARNING) << "Suppressing exception for instruction-retry: "
- << self->GetException()->Dump();
- }
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result,
- shorty[0] == 'L' || shorty[0] == '[', /* class or array */
- force_frame_pop ? nullptr : self->GetException(),
- /* from_code= */ false,
- DeoptimizationMethodType::kDefault);
-
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
- }
+ // Check if caller needs to be deoptimized for instrumentation reasons.
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) {
+ ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp);
+ uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc));
+ DCHECK(caller != nullptr);
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(result,
+ shorty[0] == 'L' || shorty[0] == '[', /* class or array */
+ self->GetException(),
+ /* from_code= */ false,
+ DeoptimizationMethodType::kDefault);
+
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
}
// No need to restore the args since the method has already been run by the interpreter.
@@ -862,7 +860,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
instr->MethodEnterEvent(soa.Self(), proxy_method);
if (soa.Self()->IsExceptionPending()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
return 0;
@@ -872,7 +869,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
if (soa.Self()->IsExceptionPending()) {
if (instr->HasMethodUnwindListeners()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
}
@@ -1023,99 +1019,10 @@ void RememberForGcArgumentVisitor::FixupReferences() {
}
}
-extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
- mirror::Object* this_object,
- Thread* self,
- ArtMethod** sp)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const void* result;
- // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
- // that part.
- ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- DCHECK(!method->IsProxyMethod())
- << "Proxy method " << method->PrettyMethod()
- << " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")"
- << " should not hit instrumentation entrypoint.";
- DCHECK(!instrumentation->IsDeoptimized(method));
- // This will get the entry point either from the oat file, the JIT or the appropriate bridge
- // method if none of those can be found.
- result = instrumentation->GetCodeForInvoke(method);
- DCHECK_NE(result, GetQuickInstrumentationEntryPoint()) << method->PrettyMethod();
- bool interpreter_entry = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result);
- bool is_static = method->IsStatic();
- uint32_t shorty_len;
- const char* shorty =
- method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
-
- ScopedObjectAccessUnchecked soa(self);
- RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa);
- visitor.VisitArguments();
-
- StackHandleScope<2> hs(self);
- Handle<mirror::Object> h_object(hs.NewHandle(is_static ? nullptr : this_object));
- Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-
- // Ensure that the called method's class is initialized.
- if (NeedsClinitCheckBeforeCall(method) && !h_class->IsVisiblyInitialized()) {
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- visitor.FixupReferences();
- DCHECK(self->IsExceptionPending());
- return nullptr;
- }
- }
-
- instrumentation->PushInstrumentationStackFrame(self,
- is_static ? nullptr : h_object.Get(),
- method,
- reinterpret_cast<uintptr_t>(
- QuickArgumentVisitor::GetCallingPcAddr(sp)),
- QuickArgumentVisitor::GetCallingPc(sp),
- interpreter_entry);
-
- visitor.FixupReferences();
- if (UNLIKELY(self->IsExceptionPending())) {
- return nullptr;
- }
- CHECK(result != nullptr) << method->PrettyMethod();
- return result;
-}
-
-extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
- ArtMethod** sp,
- uint64_t* gpr_result,
- uint64_t* fpr_result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
- CHECK(gpr_result != nullptr);
- CHECK(fpr_result != nullptr);
- // Instrumentation exit stub must not be entered with a pending exception.
- CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
- << self->GetException()->Dump();
- // Compute address of return PC and check that it currently holds 0.
- constexpr size_t return_pc_offset =
- RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
- uintptr_t* return_pc_addr = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
- return_pc_offset);
- CHECK_EQ(*return_pc_addr, 0U);
-
- // Pop the frame filling in the return pc. The low half of the return value is 0 when
- // deoptimization shouldn't be performed with the high-half having the return address. When
- // deoptimization should be performed the low half is zero and the high-half the address of the
- // deoptimization entry point.
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
- self, return_pc_addr, gpr_result, fpr_result);
- if (self->IsExceptionPending() || self->ObserveAsyncException()) {
- return GetTwoWordFailureValue();
- }
- return return_or_deoptimize_pc;
-}
-
static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_pc == static_cast<uint32_t>(-1)) {
- CHECK(method == jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt));
+ CHECK(method == WellKnownClasses::java_lang_String_charAt);
return "<native>";
} else {
CodeItemInstructionAccessor accessor = method->DexInstructions();
@@ -1154,12 +1061,6 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
- if (UNLIKELY(caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- LOG(FATAL_WITHOUT_ABORT) << "Method: " << outer_method->PrettyMethod()
- << " native pc: " << caller_pc << " Instrumented!";
- return;
- }
-
const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
CHECK(current_code != nullptr);
CHECK(current_code->IsOptimized());
@@ -1193,7 +1094,7 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
if (dex_pc == static_cast<uint32_t>(-1)) {
tag = "special ";
CHECK(inline_info.Equals(inline_infos.back()));
- caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+ caller = WellKnownClasses::java_lang_String_charAt;
CHECK_EQ(caller->GetDexMethodIndex(), method_index);
} else {
ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
@@ -1234,11 +1135,11 @@ extern "C" const void* artQuickResolutionTrampoline(
const bool called_method_known_on_entry = !called->IsRuntimeMethod();
ArtMethod* caller = nullptr;
if (!called_method_known_on_entry) {
- caller = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc;
+ caller = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
called_method.dex_file = caller->GetDexFile();
{
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
CodeItemInstructionAccessor accessor(caller->DexInstructions());
CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
const Instruction& instr = accessor.InstructionAt(dex_pc);
@@ -1371,21 +1272,25 @@ extern "C" const void* artQuickResolutionTrampoline(
// Static invokes need class initialization check but instance invokes can proceed even if
// the class is erroneous, i.e. in the edge case of escaping instances of erroneous classes.
bool success = true;
- ObjPtr<mirror::Class> called_class = called->GetDeclaringClass();
- if (NeedsClinitCheckBeforeCall(called) && !called_class->IsVisiblyInitialized()) {
+ if (called->StillNeedsClinitCheck()) {
// Ensure that the called method's class is initialized.
StackHandleScope<1> hs(soa.Self());
- HandleWrapperObjPtr<mirror::Class> h_called_class(hs.NewHandleWrapper(&called_class));
+ Handle<mirror::Class> h_called_class = hs.NewHandle(called->GetDeclaringClass());
success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
}
if (success) {
+ // When the clinit check is at entry of the AOT/nterp code, we do the clinit check
+ // before doing the suspend check. To ensure the code sees the latest
+ // version of the class (the code doesn't do a read barrier to reduce
+ // size), do a suspend check now.
+ self->CheckSuspend();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// Check if we need instrumented code here. Since resolution stubs could suspend, it is
// possible that we instrumented the entry points after we started executing the resolution
// stub.
code = instrumentation->GetMaybeInstrumentedCodeForInvoke(called);
} else {
- DCHECK(called_class->IsErroneous());
+ DCHECK(called->GetDeclaringClass()->IsErroneous());
DCHECK(self->IsExceptionPending());
}
}
@@ -1442,10 +1347,10 @@ template<class T> class BuildNativeCallFrameStateMachine {
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = true;
- static constexpr bool kMultiFPRegistersWidened = false;
static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = true;
static constexpr bool kAlignDoubleOnStack = true;
+ static constexpr bool kNaNBoxing = false;
#elif defined(__aarch64__)
static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
static constexpr size_t kNumNativeGprArgs = 8; // 8 arguments passed in GPRs.
@@ -1454,10 +1359,22 @@ template<class T> class BuildNativeCallFrameStateMachine {
static constexpr size_t kRegistersNeededForLong = 1;
static constexpr size_t kRegistersNeededForDouble = 1;
static constexpr bool kMultiRegistersAligned = false;
- static constexpr bool kMultiFPRegistersWidened = false;
static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
+ static constexpr bool kNaNBoxing = false;
+#elif defined(__riscv)
+ static constexpr bool kNativeSoftFloatAbi = false;
+ static constexpr size_t kNumNativeGprArgs = 8;
+ static constexpr size_t kNumNativeFprArgs = 8;
+
+ static constexpr size_t kRegistersNeededForLong = 1;
+ static constexpr size_t kRegistersNeededForDouble = 1;
+ static constexpr bool kMultiRegistersAligned = false;
+ static constexpr bool kMultiGPRegistersWidened = true;
+ static constexpr bool kAlignLongOnStack = false;
+ static constexpr bool kAlignDoubleOnStack = false;
+ static constexpr bool kNaNBoxing = true;
#elif defined(__i386__)
static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
static constexpr size_t kNumNativeGprArgs = 0; // 0 arguments passed in GPRs.
@@ -1466,10 +1383,10 @@ template<class T> class BuildNativeCallFrameStateMachine {
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
- static constexpr bool kMultiFPRegistersWidened = false;
static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
+ static constexpr bool kNaNBoxing = false;
#elif defined(__x86_64__)
static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
@@ -1478,10 +1395,10 @@ template<class T> class BuildNativeCallFrameStateMachine {
static constexpr size_t kRegistersNeededForLong = 1;
static constexpr size_t kRegistersNeededForDouble = 1;
static constexpr bool kMultiRegistersAligned = false;
- static constexpr bool kMultiFPRegistersWidened = false;
static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
+ static constexpr bool kNaNBoxing = false;
#else
#error "Unsupported architecture"
#endif
@@ -1597,8 +1514,10 @@ template<class T> class BuildNativeCallFrameStateMachine {
if (HaveFloatFpr()) {
fpr_index_--;
if (kRegistersNeededForDouble == 1) {
- if (kMultiFPRegistersWidened) {
- PushFpr8(bit_cast<uint64_t, double>(val));
+ if (kNaNBoxing) {
+ // NaN boxing: no widening, just use the bits, but reset upper bits to 1s.
+ // See e.g. RISC-V manual, D extension, section "NaN Boxing of Narrower Values".
+ PushFpr8(0xFFFFFFFF00000000lu | static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
} else {
// No widening, just use the bits.
PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
@@ -1608,14 +1527,7 @@ template<class T> class BuildNativeCallFrameStateMachine {
}
} else {
stack_entries_++;
- if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
- // Need to widen before storing: Note the "double" in the template instantiation.
- // Note: We need to jump through those hoops to make the compiler happy.
- DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
- PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
- } else {
- PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
- }
+ PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
fpr_index_ = 0;
}
}
@@ -1795,7 +1707,7 @@ class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
// Add space for cookie.
DCHECK_ALIGNED(managed_sp, sizeof(uintptr_t));
- static_assert(sizeof(uintptr_t) >= sizeof(IRTSegmentState));
+ static_assert(sizeof(uintptr_t) >= sizeof(jni::LRTSegmentState));
uint8_t* sp8 = reinterpret_cast<uint8_t*>(managed_sp) - sizeof(uintptr_t);
// Layout stack arguments.
@@ -1903,10 +1815,10 @@ class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
uint32_t shorty_len,
ArtMethod** managed_sp,
uintptr_t* reserved_area)
- : QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
- jni_call_(nullptr, nullptr, nullptr, critical_native),
- sm_(&jni_call_),
- current_vreg_(nullptr) {
+ : QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
+ jni_call_(nullptr, nullptr, nullptr),
+ sm_(&jni_call_),
+ current_vreg_(nullptr) {
DCHECK_ALIGNED(managed_sp, kStackAlignment);
DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
@@ -1944,7 +1856,7 @@ class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
// The declaring class must be marked.
auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
method->GetDeclaringClassAddressWithoutBarrier());
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
artJniReadBarrier(method);
}
sm_.AdvancePointer(declaring_class);
@@ -1955,33 +1867,8 @@ class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
private:
- // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
- class FillJniCall final : public FillNativeCall {
- public:
- FillJniCall(uintptr_t* gpr_regs,
- uint32_t* fpr_regs,
- uintptr_t* stack_args,
- bool critical_native)
- : FillNativeCall(gpr_regs, fpr_regs, stack_args),
- cur_entry_(0),
- critical_native_(critical_native) {}
-
- void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
- FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
- cur_entry_ = 0U;
- }
-
- bool CriticalNative() const {
- return critical_native_;
- }
-
- private:
- size_t cur_entry_;
- const bool critical_native_;
- };
-
- FillJniCall jni_call_;
- BuildNativeCallFrameStateMachine<FillJniCall> sm_;
+ FillNativeCall jni_call_;
+ BuildNativeCallFrameStateMachine<FillNativeCall> sm_;
// Pointer to the current vreg in caller's reserved out vreg area.
// Used for spilling reference arguments.
@@ -2091,7 +1978,7 @@ extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
- self->SetTopOfStackTagged(managed_sp);
+ self->SetTopOfStackGenericJniTagged(managed_sp);
self->VerifyStack();
@@ -2104,16 +1991,21 @@ extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
// We can set the entrypoint of a native method to generic JNI even when the
// class hasn't been initialized, so we need to do the initialization check
// before invoking the native code.
- if (NeedsClinitCheckBeforeCall(called)) {
- ObjPtr<mirror::Class> declaring_class = called->GetDeclaringClass();
- if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
- // Ensure static method's class is initialized.
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
- return nullptr; // Report error.
- }
+ if (called->StillNeedsClinitCheck()) {
+ // Ensure static method's class is initialized.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class = hs.NewHandle(called->GetDeclaringClass());
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
+ return nullptr; // Report error.
+ }
+ }
+
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instr->HasMethodEntryListeners())) {
+ instr->MethodEnterEvent(self, called);
+ if (self->IsExceptionPending()) {
+ return nullptr;
}
}
@@ -2185,75 +2077,13 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
// anything that requires a mutator lock before that would cause problems as GC may have the
// exclusive mutator lock and may be moving objects, etc.
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
- DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
return GenericJniMethodEnd(self, cookie, result, result_f, called);
}
-// Fast path method resolution that can't throw exceptions.
-template <InvokeType type>
-inline ArtMethod* FindMethodFast(uint32_t method_idx,
- ObjPtr<mirror::Object> this_object,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_) {
- ScopedAssertNoThreadSuspension ants(__FUNCTION__);
- if (UNLIKELY(this_object == nullptr && type != kStatic)) {
- return nullptr;
- }
- ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
- ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
- constexpr ClassLinker::ResolveMode resolve_mode = ClassLinker::ResolveMode::kCheckICCEAndIAE;
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- ArtMethod* resolved_method = linker->GetResolvedMethod<type, resolve_mode>(method_idx, referrer);
- if (UNLIKELY(resolved_method == nullptr)) {
- return nullptr;
- }
- if (type == kInterface) { // Most common form of slow path dispatch.
- return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method,
- kRuntimePointerSize);
- }
- if (type == kStatic || type == kDirect) {
- return resolved_method;
- }
-
- if (type == kSuper) {
- // TODO This lookup is rather slow.
- dex::TypeIndex method_type_idx = dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
- ObjPtr<mirror::Class> method_reference_class = linker->LookupResolvedType(
- method_type_idx, dex_cache, referrer->GetClassLoader());
- if (method_reference_class == nullptr) {
- // Need to do full type resolution...
- return nullptr;
- }
-
- // If the referring class is in the class hierarchy of the
- // referenced class in the bytecode, we use its super class. Otherwise, we cannot
- // resolve the method.
- if (!method_reference_class->IsAssignableFrom(referring_class)) {
- return nullptr;
- }
-
- if (method_reference_class->IsInterface()) {
- return method_reference_class->FindVirtualMethodForInterfaceSuper(
- resolved_method, kRuntimePointerSize);
- }
-
- ObjPtr<mirror::Class> super_class = referring_class->GetSuperClass();
- if (resolved_method->GetMethodIndex() >= super_class->GetVTableLength()) {
- // The super class does not have the method.
- return nullptr;
- }
- return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), kRuntimePointerSize);
- }
-
- DCHECK(type == kVirtual);
- return this_object->GetClass()->GetVTableEntry(
- resolved_method->GetMethodIndex(), kRuntimePointerSize);
-}
-
// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
// for the method pointer.
//
@@ -2267,19 +2097,36 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx,
ArtMethod** sp) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
- ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
- ArtMethod* method = FindMethodFast<type>(method_idx, this_object, caller_method);
+ uint32_t dex_pc;
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
+ CodeItemInstructionAccessor accessor(caller_method->DexInstructions());
+ DCHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
+ const Instruction& instr = accessor.InstructionAt(dex_pc);
+ bool string_init = false;
+ ArtMethod* method = FindMethodToCall<type>(
+ self, caller_method, &this_object, instr, /* only_lookup_tls_cache= */ true, &string_init);
+
if (UNLIKELY(method == nullptr)) {
+ if (self->IsExceptionPending()) {
+ // Return a failure if the first lookup threw an exception.
+ return GetTwoWordFailureValue(); // Failure.
+ }
const DexFile* dex_file = caller_method->GetDexFile();
uint32_t shorty_len;
const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
{
- // Remember the args in case a GC happens in FindMethodFromCode.
+ // Remember the args in case a GC happens in FindMethodToCall.
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<type, /*access_check=*/true>(
- method_idx, &this_object, caller_method, self);
+
+ method = FindMethodToCall<type>(self,
+ caller_method,
+ &this_object,
+ instr,
+ /* only_lookup_tls_cache= */ false,
+ &string_init);
+
visitor.FixupReferences();
}
@@ -2364,9 +2211,9 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
// Fetch the dex_method_idx of the target interface method from the caller.
StackHandleScope<1> hs(self);
Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
- ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc;
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
uint32_t dex_method_idx;
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc);
Instruction::Code instr_code = instr.Opcode();
DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
@@ -2485,8 +2332,8 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s
const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
// From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
- ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ uint32_t dex_pc;
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
@@ -2527,10 +2374,9 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s
const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
const size_t first_arg = 0;
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, resolved_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
- ScopedStackedShadowFramePusher
- frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
kMethodIsStatic,
shorty,
@@ -2589,6 +2435,10 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
@@ -2609,8 +2459,8 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet
const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
// From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
- ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ uint32_t dex_pc;
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethodAndDexPc(sp, &dex_pc);
const DexFile* dex_file = caller_method->GetDexFile();
const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
@@ -2620,10 +2470,9 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet
const size_t first_arg = 0;
const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
+ CREATE_SHADOW_FRAME(num_vregs, caller_method, dex_pc);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
- ScopedStackedShadowFramePusher
- frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame);
BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
kMethodIsStatic,
shorty,
@@ -2647,40 +2496,61 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
-extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp ATTRIBUTE_UNUSED)
+extern "C" void artJniMethodEntryHook(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ ArtMethod* method = *self->GetManagedStack()->GetTopQuickFrame();
instr->MethodEnterEvent(self, method);
- if (instr->IsDeoptimized(method)) {
- // Instrumentation can request deoptimizing only a particular method (for
- // ex: when there are break points on the method). In such cases deoptimize
- // only this method. FullFrame deoptimizations are handled on method exits.
- artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
- }
}
-extern "C" int artMethodExitHook(Thread* self,
- ArtMethod* method,
- uint64_t* gpr_result,
- uint64_t* fpr_result)
+extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (instr->HasMethodEntryListeners()) {
+ instr->MethodEnterEvent(self, method);
+ // MethodEnter callback could have requested a deopt for ex: by setting a breakpoint, so
+ // check if we need a deopt here.
+ if (instr->ShouldDeoptimizeCaller(self, sp) || instr->IsDeoptimized(method)) {
+ // Instrumentation can request deoptimizing only a particular method (for ex: when
+ // there are break points on the method). In such cases deoptimize only this method.
+ // FullFrame deoptimizations are handled on method exits.
+ artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
+ }
+ } else {
+ DCHECK(!instr->IsDeoptimized(method));
+ }
+}
+
+extern "C" void artMethodExitHook(Thread* self,
+ ArtMethod** sp,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result,
+ uint32_t frame_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
- CHECK(gpr_result != nullptr);
- CHECK(fpr_result != nullptr);
// Instrumentation exit stub must not be entered with a pending exception.
CHECK(!self->IsExceptionPending())
<< "Enter instrumentation exit stub with pending exception " << self->GetException()->Dump();
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
- DCHECK(instr->AreExitStubsInstalled());
- bool is_ref;
- JValue return_value = instr->GetReturnValue(self, method, &is_ref, gpr_result, fpr_result);
- bool deoptimize = false;
- {
+ DCHECK(instr->RunExitHooks());
+
+ bool is_ref = false;
+ ArtMethod* method = *sp;
+ if (instr->HasMethodExitListeners()) {
StackHandleScope<1> hs(self);
+
+ CHECK(gpr_result != nullptr);
+ CHECK(fpr_result != nullptr);
+
+ JValue return_value = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
if (is_ref) {
// Take a handle to the return value so we won't lose it if we suspend.
@@ -2688,20 +2558,10 @@ extern "C" int artMethodExitHook(Thread* self,
}
DCHECK(!method->IsRuntimeMethod());
- // Deoptimize if the caller needs to continue execution in the interpreter. Do nothing if we get
- // back to an upcall.
- NthCallerVisitor visitor(self, 1, /*include_runtime_and_upcalls=*/false);
- visitor.WalkStack(true);
- deoptimize = instr->ShouldDeoptimizeMethod(self, visitor);
-
// If we need a deoptimization MethodExitEvent will be called by the interpreter when it
- // re-executes the return instruction.
- if (!deoptimize) {
- instr->MethodExitEvent(self,
- method,
- /* frame= */ {},
- return_value);
- }
+ // re-executes the return instruction. For native methods we have to process method exit
+ // events here since deoptimization just removes the native frame.
+ instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
if (is_ref) {
// Restore the return value if it's a reference since it might have moved.
@@ -2711,17 +2571,27 @@ extern "C" int artMethodExitHook(Thread* self,
}
if (self->IsExceptionPending() || self->ObserveAsyncException()) {
- return 1;
+ // The exception was thrown from the method exit callback. We should not call method unwind
+ // callbacks for this case.
+ self->QuickDeliverException(/* is_method_exit_exception= */ true);
+ UNREACHABLE();
}
+ // We should deoptimize here if the caller requires a deoptimization or if the current method
+ // needs a deoptimization. We may need deoptimization for the current method if method exit
+ // hooks requested this frame to be popped. IsForcedInterpreterNeededForUpcall checks for that.
+ const bool deoptimize = instr->ShouldDeoptimizeCaller(self, sp, frame_size) ||
+ Dbg::IsForcedInterpreterNeededForUpcall(self, method);
if (deoptimize) {
+ JValue ret_val = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
DeoptimizationMethodType deopt_method_type = instr->GetDeoptimizationMethodType(method);
- self->PushDeoptimizationContext(return_value, is_ref, nullptr, false, deopt_method_type);
- artDeoptimize(self);
+ self->PushDeoptimizationContext(
+ ret_val, is_ref, self->GetException(), false, deopt_method_type);
+ // Method exit callback has already been run for this method. So tell the deoptimizer to skip
+ // callbacks for this frame.
+ artDeoptimize(self, /*skip_method_exit_callbacks = */ true);
UNREACHABLE();
}
-
- return 0;
}
} // namespace art
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index c4e62e5b87..951398dc04 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -79,21 +79,9 @@ static inline const void* GetQuickDeoptimizationEntryPoint() {
return reinterpret_cast<const void*>(art_quick_deoptimize);
}
-// Return address of instrumentation entry point used by non-interpreter based tracing.
-extern "C" void art_quick_instrumentation_entry(void*);
-static inline const void* GetQuickInstrumentationEntryPoint() {
- return reinterpret_cast<const void*>(art_quick_instrumentation_entry);
-}
-
// Stub to deoptimize from compiled code.
extern "C" void art_quick_deoptimize_from_compiled_code(DeoptimizationKind);
-// The return_pc of instrumentation exit stub.
-extern "C" void art_quick_instrumentation_exit();
-static inline const void* GetQuickInstrumentationExitPc() {
- return reinterpret_cast<const void*>(art_quick_instrumentation_exit);
-}
-
extern "C" void* art_quick_string_builder_append(uint32_t format);
extern "C" void art_quick_compile_optimized(ArtMethod*, Thread*);
extern "C" void art_quick_method_entry_hook(ArtMethod*, Thread*);